1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include <linux/namei.h>
18 #include "misc.h"
19 #include "ctree.h"
20 #include "extent_map.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "print-tree.h"
24 #include "volumes.h"
25 #include "raid56.h"
26 #include "async-thread.h"
27 #include "check-integrity.h"
28 #include "rcu-string.h"
29 #include "dev-replace.h"
30 #include "sysfs.h"
31 #include "tree-checker.h"
32 #include "space-info.h"
33 #include "block-group.h"
34 #include "discard.h"
35 #include "zoned.h"
36
37 #define BTRFS_BLOCK_GROUP_STRIPE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
38 BTRFS_BLOCK_GROUP_RAID10 | \
39 BTRFS_BLOCK_GROUP_RAID56_MASK)
40
41 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
42 [BTRFS_RAID_RAID10] = {
43 .sub_stripes = 2,
44 .dev_stripes = 1,
45 .devs_max = 0, /* 0 == as many as possible */
46 .devs_min = 2,
47 .tolerated_failures = 1,
48 .devs_increment = 2,
49 .ncopies = 2,
50 .nparity = 0,
51 .raid_name = "raid10",
52 .bg_flag = BTRFS_BLOCK_GROUP_RAID10,
53 .mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
54 },
55 [BTRFS_RAID_RAID1] = {
56 .sub_stripes = 1,
57 .dev_stripes = 1,
58 .devs_max = 2,
59 .devs_min = 2,
60 .tolerated_failures = 1,
61 .devs_increment = 2,
62 .ncopies = 2,
63 .nparity = 0,
64 .raid_name = "raid1",
65 .bg_flag = BTRFS_BLOCK_GROUP_RAID1,
66 .mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
67 },
68 [BTRFS_RAID_RAID1C3] = {
69 .sub_stripes = 1,
70 .dev_stripes = 1,
71 .devs_max = 3,
72 .devs_min = 3,
73 .tolerated_failures = 2,
74 .devs_increment = 3,
75 .ncopies = 3,
76 .nparity = 0,
77 .raid_name = "raid1c3",
78 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C3,
79 .mindev_error = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
80 },
81 [BTRFS_RAID_RAID1C4] = {
82 .sub_stripes = 1,
83 .dev_stripes = 1,
84 .devs_max = 4,
85 .devs_min = 4,
86 .tolerated_failures = 3,
87 .devs_increment = 4,
88 .ncopies = 4,
89 .nparity = 0,
90 .raid_name = "raid1c4",
91 .bg_flag = BTRFS_BLOCK_GROUP_RAID1C4,
92 .mindev_error = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
93 },
94 [BTRFS_RAID_DUP] = {
95 .sub_stripes = 1,
96 .dev_stripes = 2,
97 .devs_max = 1,
98 .devs_min = 1,
99 .tolerated_failures = 0,
100 .devs_increment = 1,
101 .ncopies = 2,
102 .nparity = 0,
103 .raid_name = "dup",
104 .bg_flag = BTRFS_BLOCK_GROUP_DUP,
105 .mindev_error = 0,
106 },
107 [BTRFS_RAID_RAID0] = {
108 .sub_stripes = 1,
109 .dev_stripes = 1,
110 .devs_max = 0,
111 .devs_min = 1,
112 .tolerated_failures = 0,
113 .devs_increment = 1,
114 .ncopies = 1,
115 .nparity = 0,
116 .raid_name = "raid0",
117 .bg_flag = BTRFS_BLOCK_GROUP_RAID0,
118 .mindev_error = 0,
119 },
120 [BTRFS_RAID_SINGLE] = {
121 .sub_stripes = 1,
122 .dev_stripes = 1,
123 .devs_max = 1,
124 .devs_min = 1,
125 .tolerated_failures = 0,
126 .devs_increment = 1,
127 .ncopies = 1,
128 .nparity = 0,
129 .raid_name = "single",
130 .bg_flag = 0,
131 .mindev_error = 0,
132 },
133 [BTRFS_RAID_RAID5] = {
134 .sub_stripes = 1,
135 .dev_stripes = 1,
136 .devs_max = 0,
137 .devs_min = 2,
138 .tolerated_failures = 1,
139 .devs_increment = 1,
140 .ncopies = 1,
141 .nparity = 1,
142 .raid_name = "raid5",
143 .bg_flag = BTRFS_BLOCK_GROUP_RAID5,
144 .mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
145 },
146 [BTRFS_RAID_RAID6] = {
147 .sub_stripes = 1,
148 .dev_stripes = 1,
149 .devs_max = 0,
150 .devs_min = 3,
151 .tolerated_failures = 2,
152 .devs_increment = 1,
153 .ncopies = 1,
154 .nparity = 2,
155 .raid_name = "raid6",
156 .bg_flag = BTRFS_BLOCK_GROUP_RAID6,
157 .mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
158 },
159 };
160
161 /*
162 * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
163 * can be used as index to access btrfs_raid_array[].
164 */
btrfs_bg_flags_to_raid_index(u64 flags)165 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
166 {
167 const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK);
168
169 if (!profile)
170 return BTRFS_RAID_SINGLE;
171
172 return BTRFS_BG_FLAG_TO_INDEX(profile);
173 }
174
btrfs_bg_type_to_raid_name(u64 flags)175 const char *btrfs_bg_type_to_raid_name(u64 flags)
176 {
177 const int index = btrfs_bg_flags_to_raid_index(flags);
178
179 if (index >= BTRFS_NR_RAID_TYPES)
180 return NULL;
181
182 return btrfs_raid_array[index].raid_name;
183 }
184
185 /*
186 * Fill @buf with textual description of @bg_flags, no more than @size_buf
187 * bytes including terminating null byte.
188 */
btrfs_describe_block_groups(u64 bg_flags,char * buf,u32 size_buf)189 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
190 {
191 int i;
192 int ret;
193 char *bp = buf;
194 u64 flags = bg_flags;
195 u32 size_bp = size_buf;
196
197 if (!flags) {
198 strcpy(bp, "NONE");
199 return;
200 }
201
202 #define DESCRIBE_FLAG(flag, desc) \
203 do { \
204 if (flags & (flag)) { \
205 ret = snprintf(bp, size_bp, "%s|", (desc)); \
206 if (ret < 0 || ret >= size_bp) \
207 goto out_overflow; \
208 size_bp -= ret; \
209 bp += ret; \
210 flags &= ~(flag); \
211 } \
212 } while (0)
213
214 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
215 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
216 DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
217
218 DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
219 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
220 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
221 btrfs_raid_array[i].raid_name);
222 #undef DESCRIBE_FLAG
223
224 if (flags) {
225 ret = snprintf(bp, size_bp, "0x%llx|", flags);
226 size_bp -= ret;
227 }
228
229 if (size_bp < size_buf)
230 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
231
232 /*
233 * The text is trimmed, it's up to the caller to provide sufficiently
234 * large buffer
235 */
236 out_overflow:;
237 }
238
239 static int init_first_rw_device(struct btrfs_trans_handle *trans);
240 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
241 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
242 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
243 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
244 enum btrfs_map_op op,
245 u64 logical, u64 *length,
246 struct btrfs_io_context **bioc_ret,
247 int mirror_num, int need_raid_map);
248
249 /*
250 * Device locking
251 * ==============
252 *
253 * There are several mutexes that protect manipulation of devices and low-level
254 * structures like chunks but not block groups, extents or files
255 *
256 * uuid_mutex (global lock)
257 * ------------------------
258 * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
259 * the SCAN_DEV ioctl registration or from mount either implicitly (the first
260 * device) or requested by the device= mount option
261 *
262 * the mutex can be very coarse and can cover long-running operations
263 *
264 * protects: updates to fs_devices counters like missing devices, rw devices,
265 * seeding, structure cloning, opening/closing devices at mount/umount time
266 *
267 * global::fs_devs - add, remove, updates to the global list
268 *
269 * does not protect: manipulation of the fs_devices::devices list in general
270 * but in mount context it could be used to exclude list modifications by eg.
271 * scan ioctl
272 *
273 * btrfs_device::name - renames (write side), read is RCU
274 *
275 * fs_devices::device_list_mutex (per-fs, with RCU)
276 * ------------------------------------------------
277 * protects updates to fs_devices::devices, ie. adding and deleting
278 *
279 * simple list traversal with read-only actions can be done with RCU protection
280 *
281 * may be used to exclude some operations from running concurrently without any
282 * modifications to the list (see write_all_supers)
283 *
284 * Is not required at mount and close times, because our device list is
285 * protected by the uuid_mutex at that point.
286 *
287 * balance_mutex
288 * -------------
289 * protects balance structures (status, state) and context accessed from
290 * several places (internally, ioctl)
291 *
292 * chunk_mutex
293 * -----------
294 * protects chunks, adding or removing during allocation, trim or when a new
295 * device is added/removed. Additionally it also protects post_commit_list of
296 * individual devices, since they can be added to the transaction's
297 * post_commit_list only with chunk_mutex held.
298 *
299 * cleaner_mutex
300 * -------------
301 * a big lock that is held by the cleaner thread and prevents running subvolume
302 * cleaning together with relocation or delayed iputs
303 *
304 *
305 * Lock nesting
306 * ============
307 *
308 * uuid_mutex
309 * device_list_mutex
310 * chunk_mutex
311 * balance_mutex
312 *
313 *
314 * Exclusive operations
315 * ====================
316 *
317 * Maintains the exclusivity of the following operations that apply to the
318 * whole filesystem and cannot run in parallel.
319 *
320 * - Balance (*)
321 * - Device add
322 * - Device remove
323 * - Device replace (*)
324 * - Resize
325 *
326 * The device operations (as above) can be in one of the following states:
327 *
328 * - Running state
329 * - Paused state
330 * - Completed state
331 *
332 * Only device operations marked with (*) can go into the Paused state for the
333 * following reasons:
334 *
335 * - ioctl (only Balance can be Paused through ioctl)
336 * - filesystem remounted as read-only
337 * - filesystem unmounted and mounted as read-only
338 * - system power-cycle and filesystem mounted as read-only
339 * - filesystem or device errors leading to forced read-only
340 *
341 * The status of exclusive operation is set and cleared atomically.
342 * During the course of Paused state, fs_info::exclusive_operation remains set.
343 * A device operation in Paused or Running state can be canceled or resumed
344 * either by ioctl (Balance only) or when remounted as read-write.
345 * The exclusive status is cleared when the device operation is canceled or
346 * completed.
347 */
348
349 DEFINE_MUTEX(uuid_mutex);
350 static LIST_HEAD(fs_uuids);
btrfs_get_fs_uuids(void)351 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
352 {
353 return &fs_uuids;
354 }
355
356 /*
357 * alloc_fs_devices - allocate struct btrfs_fs_devices
358 * @fsid: if not NULL, copy the UUID to fs_devices::fsid
359 * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
360 *
361 * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
362 * The returned struct is not linked onto any lists and can be destroyed with
363 * kfree() right away.
364 */
alloc_fs_devices(const u8 * fsid,const u8 * metadata_fsid)365 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
366 const u8 *metadata_fsid)
367 {
368 struct btrfs_fs_devices *fs_devs;
369
370 fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
371 if (!fs_devs)
372 return ERR_PTR(-ENOMEM);
373
374 mutex_init(&fs_devs->device_list_mutex);
375
376 INIT_LIST_HEAD(&fs_devs->devices);
377 INIT_LIST_HEAD(&fs_devs->alloc_list);
378 INIT_LIST_HEAD(&fs_devs->fs_list);
379 INIT_LIST_HEAD(&fs_devs->seed_list);
380 if (fsid)
381 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
382
383 if (metadata_fsid)
384 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
385 else if (fsid)
386 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
387
388 return fs_devs;
389 }
390
btrfs_free_device(struct btrfs_device * device)391 void btrfs_free_device(struct btrfs_device *device)
392 {
393 WARN_ON(!list_empty(&device->post_commit_list));
394 rcu_string_free(device->name);
395 extent_io_tree_release(&device->alloc_state);
396 btrfs_destroy_dev_zone_info(device);
397 kfree(device);
398 }
399
free_fs_devices(struct btrfs_fs_devices * fs_devices)400 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
401 {
402 struct btrfs_device *device;
403 WARN_ON(fs_devices->opened);
404 while (!list_empty(&fs_devices->devices)) {
405 device = list_entry(fs_devices->devices.next,
406 struct btrfs_device, dev_list);
407 list_del(&device->dev_list);
408 btrfs_free_device(device);
409 }
410 kfree(fs_devices);
411 }
412
btrfs_cleanup_fs_uuids(void)413 void __exit btrfs_cleanup_fs_uuids(void)
414 {
415 struct btrfs_fs_devices *fs_devices;
416
417 while (!list_empty(&fs_uuids)) {
418 fs_devices = list_entry(fs_uuids.next,
419 struct btrfs_fs_devices, fs_list);
420 list_del(&fs_devices->fs_list);
421 free_fs_devices(fs_devices);
422 }
423 }
424
find_fsid(const u8 * fsid,const u8 * metadata_fsid)425 static noinline struct btrfs_fs_devices *find_fsid(
426 const u8 *fsid, const u8 *metadata_fsid)
427 {
428 struct btrfs_fs_devices *fs_devices;
429
430 ASSERT(fsid);
431
432 /* Handle non-split brain cases */
433 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
434 if (metadata_fsid) {
435 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
436 && memcmp(metadata_fsid, fs_devices->metadata_uuid,
437 BTRFS_FSID_SIZE) == 0)
438 return fs_devices;
439 } else {
440 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
441 return fs_devices;
442 }
443 }
444 return NULL;
445 }
446
find_fsid_with_metadata_uuid(struct btrfs_super_block * disk_super)447 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
448 struct btrfs_super_block *disk_super)
449 {
450
451 struct btrfs_fs_devices *fs_devices;
452
453 /*
454 * Handle scanned device having completed its fsid change but
455 * belonging to a fs_devices that was created by first scanning
456 * a device which didn't have its fsid/metadata_uuid changed
457 * at all and the CHANGING_FSID_V2 flag set.
458 */
459 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
460 if (fs_devices->fsid_change &&
461 memcmp(disk_super->metadata_uuid, fs_devices->fsid,
462 BTRFS_FSID_SIZE) == 0 &&
463 memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
464 BTRFS_FSID_SIZE) == 0) {
465 return fs_devices;
466 }
467 }
468 /*
469 * Handle scanned device having completed its fsid change but
470 * belonging to a fs_devices that was created by a device that
471 * has an outdated pair of fsid/metadata_uuid and
472 * CHANGING_FSID_V2 flag set.
473 */
474 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
475 if (fs_devices->fsid_change &&
476 memcmp(fs_devices->metadata_uuid,
477 fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
478 memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
479 BTRFS_FSID_SIZE) == 0) {
480 return fs_devices;
481 }
482 }
483
484 return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
485 }
486
487
488 static int
btrfs_get_bdev_and_sb(const char * device_path,fmode_t flags,void * holder,int flush,struct block_device ** bdev,struct btrfs_super_block ** disk_super)489 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
490 int flush, struct block_device **bdev,
491 struct btrfs_super_block **disk_super)
492 {
493 int ret;
494
495 *bdev = blkdev_get_by_path(device_path, flags, holder);
496
497 if (IS_ERR(*bdev)) {
498 ret = PTR_ERR(*bdev);
499 goto error;
500 }
501
502 if (flush)
503 sync_blockdev(*bdev);
504 ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
505 if (ret) {
506 blkdev_put(*bdev, flags);
507 goto error;
508 }
509 invalidate_bdev(*bdev);
510 *disk_super = btrfs_read_dev_super(*bdev);
511 if (IS_ERR(*disk_super)) {
512 ret = PTR_ERR(*disk_super);
513 blkdev_put(*bdev, flags);
514 goto error;
515 }
516
517 return 0;
518
519 error:
520 *bdev = NULL;
521 return ret;
522 }
523
524 /**
525 * Search and remove all stale devices (which are not mounted).
526 * When both inputs are NULL, it will search and release all stale devices.
527 *
528 * @devt: Optional. When provided will it release all unmounted devices
529 * matching this devt only.
530 * @skip_device: Optional. Will skip this device when searching for the stale
531 * devices.
532 *
533 * Return: 0 for success or if @devt is 0.
534 * -EBUSY if @devt is a mounted device.
535 * -ENOENT if @devt does not match any device in the list.
536 */
btrfs_free_stale_devices(dev_t devt,struct btrfs_device * skip_device)537 static int btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device)
538 {
539 struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
540 struct btrfs_device *device, *tmp_device;
541 int ret = 0;
542
543 lockdep_assert_held(&uuid_mutex);
544
545 if (devt)
546 ret = -ENOENT;
547
548 list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
549
550 mutex_lock(&fs_devices->device_list_mutex);
551 list_for_each_entry_safe(device, tmp_device,
552 &fs_devices->devices, dev_list) {
553 if (skip_device && skip_device == device)
554 continue;
555 if (devt && devt != device->devt)
556 continue;
557 if (fs_devices->opened) {
558 /* for an already deleted device return 0 */
559 if (devt && ret != 0)
560 ret = -EBUSY;
561 break;
562 }
563
564 /* delete the stale device */
565 fs_devices->num_devices--;
566 list_del(&device->dev_list);
567 btrfs_free_device(device);
568
569 ret = 0;
570 }
571 mutex_unlock(&fs_devices->device_list_mutex);
572
573 if (fs_devices->num_devices == 0) {
574 btrfs_sysfs_remove_fsid(fs_devices);
575 list_del(&fs_devices->fs_list);
576 free_fs_devices(fs_devices);
577 }
578 }
579
580 return ret;
581 }
582
583 /*
584 * This is only used on mount, and we are protected from competing things
585 * messing with our fs_devices by the uuid_mutex, thus we do not need the
586 * fs_devices->device_list_mutex here.
587 */
btrfs_open_one_device(struct btrfs_fs_devices * fs_devices,struct btrfs_device * device,fmode_t flags,void * holder)588 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
589 struct btrfs_device *device, fmode_t flags,
590 void *holder)
591 {
592 struct block_device *bdev;
593 struct btrfs_super_block *disk_super;
594 u64 devid;
595 int ret;
596
597 if (device->bdev)
598 return -EINVAL;
599 if (!device->name)
600 return -EINVAL;
601
602 ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
603 &bdev, &disk_super);
604 if (ret)
605 return ret;
606
607 devid = btrfs_stack_device_id(&disk_super->dev_item);
608 if (devid != device->devid)
609 goto error_free_page;
610
611 if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
612 goto error_free_page;
613
614 device->generation = btrfs_super_generation(disk_super);
615
616 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
617 if (btrfs_super_incompat_flags(disk_super) &
618 BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
619 pr_err(
620 "BTRFS: Invalid seeding and uuid-changed device detected\n");
621 goto error_free_page;
622 }
623
624 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
625 fs_devices->seeding = true;
626 } else {
627 if (bdev_read_only(bdev))
628 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
629 else
630 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
631 }
632
633 if (!bdev_nonrot(bdev))
634 fs_devices->rotating = true;
635
636 device->bdev = bdev;
637 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
638 device->mode = flags;
639
640 fs_devices->open_devices++;
641 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
642 device->devid != BTRFS_DEV_REPLACE_DEVID) {
643 fs_devices->rw_devices++;
644 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
645 }
646 btrfs_release_disk_super(disk_super);
647
648 return 0;
649
650 error_free_page:
651 btrfs_release_disk_super(disk_super);
652 blkdev_put(bdev, flags);
653
654 return -EINVAL;
655 }
656
657 /*
658 * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
659 * being created with a disk that has already completed its fsid change. Such
660 * disk can belong to an fs which has its FSID changed or to one which doesn't.
661 * Handle both cases here.
662 */
find_fsid_inprogress(struct btrfs_super_block * disk_super)663 static struct btrfs_fs_devices *find_fsid_inprogress(
664 struct btrfs_super_block *disk_super)
665 {
666 struct btrfs_fs_devices *fs_devices;
667
668 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
669 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
670 BTRFS_FSID_SIZE) != 0 &&
671 memcmp(fs_devices->metadata_uuid, disk_super->fsid,
672 BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
673 return fs_devices;
674 }
675 }
676
677 return find_fsid(disk_super->fsid, NULL);
678 }
679
680
find_fsid_changed(struct btrfs_super_block * disk_super)681 static struct btrfs_fs_devices *find_fsid_changed(
682 struct btrfs_super_block *disk_super)
683 {
684 struct btrfs_fs_devices *fs_devices;
685
686 /*
687 * Handles the case where scanned device is part of an fs that had
688 * multiple successful changes of FSID but currently device didn't
689 * observe it. Meaning our fsid will be different than theirs. We need
690 * to handle two subcases :
691 * 1 - The fs still continues to have different METADATA/FSID uuids.
692 * 2 - The fs is switched back to its original FSID (METADATA/FSID
693 * are equal).
694 */
695 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
696 /* Changed UUIDs */
697 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
698 BTRFS_FSID_SIZE) != 0 &&
699 memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
700 BTRFS_FSID_SIZE) == 0 &&
701 memcmp(fs_devices->fsid, disk_super->fsid,
702 BTRFS_FSID_SIZE) != 0)
703 return fs_devices;
704
705 /* Unchanged UUIDs */
706 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
707 BTRFS_FSID_SIZE) == 0 &&
708 memcmp(fs_devices->fsid, disk_super->metadata_uuid,
709 BTRFS_FSID_SIZE) == 0)
710 return fs_devices;
711 }
712
713 return NULL;
714 }
715
find_fsid_reverted_metadata(struct btrfs_super_block * disk_super)716 static struct btrfs_fs_devices *find_fsid_reverted_metadata(
717 struct btrfs_super_block *disk_super)
718 {
719 struct btrfs_fs_devices *fs_devices;
720
721 /*
722 * Handle the case where the scanned device is part of an fs whose last
723 * metadata UUID change reverted it to the original FSID. At the same
724 * time * fs_devices was first created by another constitutent device
725 * which didn't fully observe the operation. This results in an
726 * btrfs_fs_devices created with metadata/fsid different AND
727 * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
728 * fs_devices equal to the FSID of the disk.
729 */
730 list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
731 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
732 BTRFS_FSID_SIZE) != 0 &&
733 memcmp(fs_devices->metadata_uuid, disk_super->fsid,
734 BTRFS_FSID_SIZE) == 0 &&
735 fs_devices->fsid_change)
736 return fs_devices;
737 }
738
739 return NULL;
740 }
741 /*
742 * Add new device to list of registered devices
743 *
744 * Returns:
745 * device pointer which was just added or updated when successful
746 * error pointer when failed
747 */
device_list_add(const char * path,struct btrfs_super_block * disk_super,bool * new_device_added)748 static noinline struct btrfs_device *device_list_add(const char *path,
749 struct btrfs_super_block *disk_super,
750 bool *new_device_added)
751 {
752 struct btrfs_device *device;
753 struct btrfs_fs_devices *fs_devices = NULL;
754 struct rcu_string *name;
755 u64 found_transid = btrfs_super_generation(disk_super);
756 u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
757 dev_t path_devt;
758 int error;
759 bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
760 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
761 bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
762 BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
763
764 error = lookup_bdev(path, &path_devt);
765 if (error)
766 return ERR_PTR(error);
767
768 if (fsid_change_in_progress) {
769 if (!has_metadata_uuid)
770 fs_devices = find_fsid_inprogress(disk_super);
771 else
772 fs_devices = find_fsid_changed(disk_super);
773 } else if (has_metadata_uuid) {
774 fs_devices = find_fsid_with_metadata_uuid(disk_super);
775 } else {
776 fs_devices = find_fsid_reverted_metadata(disk_super);
777 if (!fs_devices)
778 fs_devices = find_fsid(disk_super->fsid, NULL);
779 }
780
781
782 if (!fs_devices) {
783 if (has_metadata_uuid)
784 fs_devices = alloc_fs_devices(disk_super->fsid,
785 disk_super->metadata_uuid);
786 else
787 fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
788
789 if (IS_ERR(fs_devices))
790 return ERR_CAST(fs_devices);
791
792 fs_devices->fsid_change = fsid_change_in_progress;
793
794 mutex_lock(&fs_devices->device_list_mutex);
795 list_add(&fs_devices->fs_list, &fs_uuids);
796
797 device = NULL;
798 } else {
799 struct btrfs_dev_lookup_args args = {
800 .devid = devid,
801 .uuid = disk_super->dev_item.uuid,
802 };
803
804 mutex_lock(&fs_devices->device_list_mutex);
805 device = btrfs_find_device(fs_devices, &args);
806
807 /*
808 * If this disk has been pulled into an fs devices created by
809 * a device which had the CHANGING_FSID_V2 flag then replace the
810 * metadata_uuid/fsid values of the fs_devices.
811 */
812 if (fs_devices->fsid_change &&
813 found_transid > fs_devices->latest_generation) {
814 memcpy(fs_devices->fsid, disk_super->fsid,
815 BTRFS_FSID_SIZE);
816
817 if (has_metadata_uuid)
818 memcpy(fs_devices->metadata_uuid,
819 disk_super->metadata_uuid,
820 BTRFS_FSID_SIZE);
821 else
822 memcpy(fs_devices->metadata_uuid,
823 disk_super->fsid, BTRFS_FSID_SIZE);
824
825 fs_devices->fsid_change = false;
826 }
827 }
828
829 if (!device) {
830 if (fs_devices->opened) {
831 mutex_unlock(&fs_devices->device_list_mutex);
832 return ERR_PTR(-EBUSY);
833 }
834
835 device = btrfs_alloc_device(NULL, &devid,
836 disk_super->dev_item.uuid);
837 if (IS_ERR(device)) {
838 mutex_unlock(&fs_devices->device_list_mutex);
839 /* we can safely leave the fs_devices entry around */
840 return device;
841 }
842
843 name = rcu_string_strdup(path, GFP_NOFS);
844 if (!name) {
845 btrfs_free_device(device);
846 mutex_unlock(&fs_devices->device_list_mutex);
847 return ERR_PTR(-ENOMEM);
848 }
849 rcu_assign_pointer(device->name, name);
850 device->devt = path_devt;
851
852 list_add_rcu(&device->dev_list, &fs_devices->devices);
853 fs_devices->num_devices++;
854
855 device->fs_devices = fs_devices;
856 *new_device_added = true;
857
858 if (disk_super->label[0])
859 pr_info(
860 "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
861 disk_super->label, devid, found_transid, path,
862 current->comm, task_pid_nr(current));
863 else
864 pr_info(
865 "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
866 disk_super->fsid, devid, found_transid, path,
867 current->comm, task_pid_nr(current));
868
869 } else if (!device->name || strcmp(device->name->str, path)) {
870 /*
871 * When FS is already mounted.
872 * 1. If you are here and if the device->name is NULL that
873 * means this device was missing at time of FS mount.
874 * 2. If you are here and if the device->name is different
875 * from 'path' that means either
876 * a. The same device disappeared and reappeared with
877 * different name. or
878 * b. The missing-disk-which-was-replaced, has
879 * reappeared now.
880 *
881 * We must allow 1 and 2a above. But 2b would be a spurious
882 * and unintentional.
883 *
884 * Further in case of 1 and 2a above, the disk at 'path'
885 * would have missed some transaction when it was away and
886 * in case of 2a the stale bdev has to be updated as well.
887 * 2b must not be allowed at all time.
888 */
889
890 /*
891 * For now, we do allow update to btrfs_fs_device through the
892 * btrfs dev scan cli after FS has been mounted. We're still
893 * tracking a problem where systems fail mount by subvolume id
894 * when we reject replacement on a mounted FS.
895 */
896 if (!fs_devices->opened && found_transid < device->generation) {
897 /*
898 * That is if the FS is _not_ mounted and if you
899 * are here, that means there is more than one
900 * disk with same uuid and devid.We keep the one
901 * with larger generation number or the last-in if
902 * generation are equal.
903 */
904 mutex_unlock(&fs_devices->device_list_mutex);
905 return ERR_PTR(-EEXIST);
906 }
907
908 /*
909 * We are going to replace the device path for a given devid,
910 * make sure it's the same device if the device is mounted
911 *
912 * NOTE: the device->fs_info may not be reliable here so pass
913 * in a NULL to message helpers instead. This avoids a possible
914 * use-after-free when the fs_info and fs_info->sb are already
915 * torn down.
916 */
917 if (device->bdev) {
918 if (device->devt != path_devt) {
919 mutex_unlock(&fs_devices->device_list_mutex);
920 btrfs_warn_in_rcu(NULL,
921 "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
922 path, devid, found_transid,
923 current->comm,
924 task_pid_nr(current));
925 return ERR_PTR(-EEXIST);
926 }
927 btrfs_info_in_rcu(NULL,
928 "devid %llu device path %s changed to %s scanned by %s (%d)",
929 devid, rcu_str_deref(device->name),
930 path, current->comm,
931 task_pid_nr(current));
932 }
933
934 name = rcu_string_strdup(path, GFP_NOFS);
935 if (!name) {
936 mutex_unlock(&fs_devices->device_list_mutex);
937 return ERR_PTR(-ENOMEM);
938 }
939 rcu_string_free(device->name);
940 rcu_assign_pointer(device->name, name);
941 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
942 fs_devices->missing_devices--;
943 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
944 }
945 device->devt = path_devt;
946 }
947
948 /*
949 * Unmount does not free the btrfs_device struct but would zero
950 * generation along with most of the other members. So just update
951 * it back. We need it to pick the disk with largest generation
952 * (as above).
953 */
954 if (!fs_devices->opened) {
955 device->generation = found_transid;
956 fs_devices->latest_generation = max_t(u64, found_transid,
957 fs_devices->latest_generation);
958 }
959
960 fs_devices->total_devices = btrfs_super_num_devices(disk_super);
961
962 mutex_unlock(&fs_devices->device_list_mutex);
963 return device;
964 }
965
clone_fs_devices(struct btrfs_fs_devices * orig)966 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
967 {
968 struct btrfs_fs_devices *fs_devices;
969 struct btrfs_device *device;
970 struct btrfs_device *orig_dev;
971 int ret = 0;
972
973 lockdep_assert_held(&uuid_mutex);
974
975 fs_devices = alloc_fs_devices(orig->fsid, NULL);
976 if (IS_ERR(fs_devices))
977 return fs_devices;
978
979 fs_devices->total_devices = orig->total_devices;
980
981 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
982 struct rcu_string *name;
983
984 device = btrfs_alloc_device(NULL, &orig_dev->devid,
985 orig_dev->uuid);
986 if (IS_ERR(device)) {
987 ret = PTR_ERR(device);
988 goto error;
989 }
990
991 /*
992 * This is ok to do without rcu read locked because we hold the
993 * uuid mutex so nothing we touch in here is going to disappear.
994 */
995 if (orig_dev->name) {
996 name = rcu_string_strdup(orig_dev->name->str,
997 GFP_KERNEL);
998 if (!name) {
999 btrfs_free_device(device);
1000 ret = -ENOMEM;
1001 goto error;
1002 }
1003 rcu_assign_pointer(device->name, name);
1004 }
1005
1006 list_add(&device->dev_list, &fs_devices->devices);
1007 device->fs_devices = fs_devices;
1008 fs_devices->num_devices++;
1009 }
1010 return fs_devices;
1011 error:
1012 free_fs_devices(fs_devices);
1013 return ERR_PTR(ret);
1014 }
1015
__btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices,struct btrfs_device ** latest_dev)1016 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1017 struct btrfs_device **latest_dev)
1018 {
1019 struct btrfs_device *device, *next;
1020
1021 /* This is the initialized path, it is safe to release the devices. */
1022 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1023 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1024 if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1025 &device->dev_state) &&
1026 !test_bit(BTRFS_DEV_STATE_MISSING,
1027 &device->dev_state) &&
1028 (!*latest_dev ||
1029 device->generation > (*latest_dev)->generation)) {
1030 *latest_dev = device;
1031 }
1032 continue;
1033 }
1034
1035 /*
1036 * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1037 * in btrfs_init_dev_replace() so just continue.
1038 */
1039 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1040 continue;
1041
1042 if (device->bdev) {
1043 blkdev_put(device->bdev, device->mode);
1044 device->bdev = NULL;
1045 fs_devices->open_devices--;
1046 }
1047 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1048 list_del_init(&device->dev_alloc_list);
1049 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1050 fs_devices->rw_devices--;
1051 }
1052 list_del_init(&device->dev_list);
1053 fs_devices->num_devices--;
1054 btrfs_free_device(device);
1055 }
1056
1057 }
1058
1059 /*
1060 * After we have read the system tree and know devids belonging to this
1061 * filesystem, remove the device which does not belong there.
1062 */
btrfs_free_extra_devids(struct btrfs_fs_devices * fs_devices)1063 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
1064 {
1065 struct btrfs_device *latest_dev = NULL;
1066 struct btrfs_fs_devices *seed_dev;
1067
1068 mutex_lock(&uuid_mutex);
1069 __btrfs_free_extra_devids(fs_devices, &latest_dev);
1070
1071 list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1072 __btrfs_free_extra_devids(seed_dev, &latest_dev);
1073
1074 fs_devices->latest_dev = latest_dev;
1075
1076 mutex_unlock(&uuid_mutex);
1077 }
1078
btrfs_close_bdev(struct btrfs_device * device)1079 static void btrfs_close_bdev(struct btrfs_device *device)
1080 {
1081 if (!device->bdev)
1082 return;
1083
1084 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1085 sync_blockdev(device->bdev);
1086 invalidate_bdev(device->bdev);
1087 }
1088
1089 blkdev_put(device->bdev, device->mode);
1090 }
1091
btrfs_close_one_device(struct btrfs_device * device)1092 static void btrfs_close_one_device(struct btrfs_device *device)
1093 {
1094 struct btrfs_fs_devices *fs_devices = device->fs_devices;
1095
1096 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1097 device->devid != BTRFS_DEV_REPLACE_DEVID) {
1098 list_del_init(&device->dev_alloc_list);
1099 fs_devices->rw_devices--;
1100 }
1101
1102 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1103 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1104
1105 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
1106 clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
1107 fs_devices->missing_devices--;
1108 }
1109
1110 btrfs_close_bdev(device);
1111 if (device->bdev) {
1112 fs_devices->open_devices--;
1113 device->bdev = NULL;
1114 }
1115 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1116 btrfs_destroy_dev_zone_info(device);
1117
1118 device->fs_info = NULL;
1119 atomic_set(&device->dev_stats_ccnt, 0);
1120 extent_io_tree_release(&device->alloc_state);
1121
1122 /*
1123 * Reset the flush error record. We might have a transient flush error
1124 * in this mount, and if so we aborted the current transaction and set
1125 * the fs to an error state, guaranteeing no super blocks can be further
1126 * committed. However that error might be transient and if we unmount the
1127 * filesystem and mount it again, we should allow the mount to succeed
1128 * (btrfs_check_rw_degradable() should not fail) - if after mounting the
1129 * filesystem again we still get flush errors, then we will again abort
1130 * any transaction and set the error state, guaranteeing no commits of
1131 * unsafe super blocks.
1132 */
1133 device->last_flush_error = 0;
1134
1135 /* Verify the device is back in a pristine state */
1136 ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1137 ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1138 ASSERT(list_empty(&device->dev_alloc_list));
1139 ASSERT(list_empty(&device->post_commit_list));
1140 }
1141
close_fs_devices(struct btrfs_fs_devices * fs_devices)1142 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1143 {
1144 struct btrfs_device *device, *tmp;
1145
1146 lockdep_assert_held(&uuid_mutex);
1147
1148 if (--fs_devices->opened > 0)
1149 return;
1150
1151 list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1152 btrfs_close_one_device(device);
1153
1154 WARN_ON(fs_devices->open_devices);
1155 WARN_ON(fs_devices->rw_devices);
1156 fs_devices->opened = 0;
1157 fs_devices->seeding = false;
1158 fs_devices->fs_info = NULL;
1159 }
1160
btrfs_close_devices(struct btrfs_fs_devices * fs_devices)1161 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1162 {
1163 LIST_HEAD(list);
1164 struct btrfs_fs_devices *tmp;
1165
1166 mutex_lock(&uuid_mutex);
1167 close_fs_devices(fs_devices);
1168 if (!fs_devices->opened)
1169 list_splice_init(&fs_devices->seed_list, &list);
1170
1171 list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1172 close_fs_devices(fs_devices);
1173 list_del(&fs_devices->seed_list);
1174 free_fs_devices(fs_devices);
1175 }
1176 mutex_unlock(&uuid_mutex);
1177 }
1178
open_fs_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1179 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1180 fmode_t flags, void *holder)
1181 {
1182 struct btrfs_device *device;
1183 struct btrfs_device *latest_dev = NULL;
1184 struct btrfs_device *tmp_device;
1185
1186 flags |= FMODE_EXCL;
1187
1188 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1189 dev_list) {
1190 int ret;
1191
1192 ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1193 if (ret == 0 &&
1194 (!latest_dev || device->generation > latest_dev->generation)) {
1195 latest_dev = device;
1196 } else if (ret == -ENODATA) {
1197 fs_devices->num_devices--;
1198 list_del(&device->dev_list);
1199 btrfs_free_device(device);
1200 }
1201 }
1202 if (fs_devices->open_devices == 0)
1203 return -EINVAL;
1204
1205 fs_devices->opened = 1;
1206 fs_devices->latest_dev = latest_dev;
1207 fs_devices->total_rw_bytes = 0;
1208 fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1209 fs_devices->read_policy = BTRFS_READ_POLICY_PID;
1210
1211 return 0;
1212 }
1213
devid_cmp(void * priv,const struct list_head * a,const struct list_head * b)1214 static int devid_cmp(void *priv, const struct list_head *a,
1215 const struct list_head *b)
1216 {
1217 const struct btrfs_device *dev1, *dev2;
1218
1219 dev1 = list_entry(a, struct btrfs_device, dev_list);
1220 dev2 = list_entry(b, struct btrfs_device, dev_list);
1221
1222 if (dev1->devid < dev2->devid)
1223 return -1;
1224 else if (dev1->devid > dev2->devid)
1225 return 1;
1226 return 0;
1227 }
1228
btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)1229 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1230 fmode_t flags, void *holder)
1231 {
1232 int ret;
1233
1234 lockdep_assert_held(&uuid_mutex);
1235 /*
1236 * The device_list_mutex cannot be taken here in case opening the
1237 * underlying device takes further locks like open_mutex.
1238 *
1239 * We also don't need the lock here as this is called during mount and
1240 * exclusion is provided by uuid_mutex
1241 */
1242
1243 if (fs_devices->opened) {
1244 fs_devices->opened++;
1245 ret = 0;
1246 } else {
1247 list_sort(NULL, &fs_devices->devices, devid_cmp);
1248 ret = open_fs_devices(fs_devices, flags, holder);
1249 }
1250
1251 return ret;
1252 }
1253
btrfs_release_disk_super(struct btrfs_super_block * super)1254 void btrfs_release_disk_super(struct btrfs_super_block *super)
1255 {
1256 struct page *page = virt_to_page(super);
1257
1258 put_page(page);
1259 }
1260
btrfs_read_disk_super(struct block_device * bdev,u64 bytenr,u64 bytenr_orig)1261 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1262 u64 bytenr, u64 bytenr_orig)
1263 {
1264 struct btrfs_super_block *disk_super;
1265 struct page *page;
1266 void *p;
1267 pgoff_t index;
1268
1269 /* make sure our super fits in the device */
1270 if (bytenr + PAGE_SIZE >= bdev_nr_bytes(bdev))
1271 return ERR_PTR(-EINVAL);
1272
1273 /* make sure our super fits in the page */
1274 if (sizeof(*disk_super) > PAGE_SIZE)
1275 return ERR_PTR(-EINVAL);
1276
1277 /* make sure our super doesn't straddle pages on disk */
1278 index = bytenr >> PAGE_SHIFT;
1279 if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1280 return ERR_PTR(-EINVAL);
1281
1282 /* pull in the page with our super */
1283 page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1284
1285 if (IS_ERR(page))
1286 return ERR_CAST(page);
1287
1288 p = page_address(page);
1289
1290 /* align our pointer to the offset of the super block */
1291 disk_super = p + offset_in_page(bytenr);
1292
1293 if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
1294 btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1295 btrfs_release_disk_super(p);
1296 return ERR_PTR(-EINVAL);
1297 }
1298
1299 if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1300 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1301
1302 return disk_super;
1303 }
1304
btrfs_forget_devices(dev_t devt)1305 int btrfs_forget_devices(dev_t devt)
1306 {
1307 int ret;
1308
1309 mutex_lock(&uuid_mutex);
1310 ret = btrfs_free_stale_devices(devt, NULL);
1311 mutex_unlock(&uuid_mutex);
1312
1313 return ret;
1314 }
1315
1316 /*
1317 * Look for a btrfs signature on a device. This may be called out of the mount path
1318 * and we are not allowed to call set_blocksize during the scan. The superblock
1319 * is read via pagecache
1320 */
btrfs_scan_one_device(const char * path,fmode_t flags,void * holder)1321 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1322 void *holder)
1323 {
1324 struct btrfs_super_block *disk_super;
1325 bool new_device_added = false;
1326 struct btrfs_device *device = NULL;
1327 struct block_device *bdev;
1328 u64 bytenr, bytenr_orig;
1329 int ret;
1330
1331 lockdep_assert_held(&uuid_mutex);
1332
1333 /*
1334 * we would like to check all the supers, but that would make
1335 * a btrfs mount succeed after a mkfs from a different FS.
1336 * So, we need to add a special mount option to scan for
1337 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1338 */
1339 flags |= FMODE_EXCL;
1340
1341 bdev = blkdev_get_by_path(path, flags, holder);
1342 if (IS_ERR(bdev))
1343 return ERR_CAST(bdev);
1344
1345 bytenr_orig = btrfs_sb_offset(0);
1346 ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
1347 if (ret) {
1348 device = ERR_PTR(ret);
1349 goto error_bdev_put;
1350 }
1351
1352 disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
1353 if (IS_ERR(disk_super)) {
1354 device = ERR_CAST(disk_super);
1355 goto error_bdev_put;
1356 }
1357
1358 device = device_list_add(path, disk_super, &new_device_added);
1359 if (!IS_ERR(device) && new_device_added)
1360 btrfs_free_stale_devices(device->devt, device);
1361
1362 btrfs_release_disk_super(disk_super);
1363
1364 error_bdev_put:
1365 blkdev_put(bdev, flags);
1366
1367 return device;
1368 }
1369
1370 /*
1371 * Try to find a chunk that intersects [start, start + len] range and when one
1372 * such is found, record the end of it in *start
1373 */
contains_pending_extent(struct btrfs_device * device,u64 * start,u64 len)1374 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1375 u64 len)
1376 {
1377 u64 physical_start, physical_end;
1378
1379 lockdep_assert_held(&device->fs_info->chunk_mutex);
1380
1381 if (!find_first_extent_bit(&device->alloc_state, *start,
1382 &physical_start, &physical_end,
1383 CHUNK_ALLOCATED, NULL)) {
1384
1385 if (in_range(physical_start, *start, len) ||
1386 in_range(*start, physical_start,
1387 physical_end - physical_start)) {
1388 *start = physical_end + 1;
1389 return true;
1390 }
1391 }
1392 return false;
1393 }
1394
dev_extent_search_start(struct btrfs_device * device,u64 start)1395 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1396 {
1397 switch (device->fs_devices->chunk_alloc_policy) {
1398 case BTRFS_CHUNK_ALLOC_REGULAR:
1399 /*
1400 * We don't want to overwrite the superblock on the drive nor
1401 * any area used by the boot loader (grub for example), so we
1402 * make sure to start at an offset of at least 1MB.
1403 */
1404 return max_t(u64, start, SZ_1M);
1405 case BTRFS_CHUNK_ALLOC_ZONED:
1406 /*
1407 * We don't care about the starting region like regular
1408 * allocator, because we anyway use/reserve the first two zones
1409 * for superblock logging.
1410 */
1411 return ALIGN(start, device->zone_info->zone_size);
1412 default:
1413 BUG();
1414 }
1415 }
1416
dev_extent_hole_check_zoned(struct btrfs_device * device,u64 * hole_start,u64 * hole_size,u64 num_bytes)1417 static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
1418 u64 *hole_start, u64 *hole_size,
1419 u64 num_bytes)
1420 {
1421 u64 zone_size = device->zone_info->zone_size;
1422 u64 pos;
1423 int ret;
1424 bool changed = false;
1425
1426 ASSERT(IS_ALIGNED(*hole_start, zone_size));
1427
1428 while (*hole_size > 0) {
1429 pos = btrfs_find_allocatable_zones(device, *hole_start,
1430 *hole_start + *hole_size,
1431 num_bytes);
1432 if (pos != *hole_start) {
1433 *hole_size = *hole_start + *hole_size - pos;
1434 *hole_start = pos;
1435 changed = true;
1436 if (*hole_size < num_bytes)
1437 break;
1438 }
1439
1440 ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
1441
1442 /* Range is ensured to be empty */
1443 if (!ret)
1444 return changed;
1445
1446 /* Given hole range was invalid (outside of device) */
1447 if (ret == -ERANGE) {
1448 *hole_start += *hole_size;
1449 *hole_size = 0;
1450 return true;
1451 }
1452
1453 *hole_start += zone_size;
1454 *hole_size -= zone_size;
1455 changed = true;
1456 }
1457
1458 return changed;
1459 }
1460
1461 /**
1462 * dev_extent_hole_check - check if specified hole is suitable for allocation
1463 * @device: the device which we have the hole
1464 * @hole_start: starting position of the hole
1465 * @hole_size: the size of the hole
1466 * @num_bytes: the size of the free space that we need
1467 *
1468 * This function may modify @hole_start and @hole_size to reflect the suitable
1469 * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1470 */
dev_extent_hole_check(struct btrfs_device * device,u64 * hole_start,u64 * hole_size,u64 num_bytes)1471 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1472 u64 *hole_size, u64 num_bytes)
1473 {
1474 bool changed = false;
1475 u64 hole_end = *hole_start + *hole_size;
1476
1477 for (;;) {
1478 /*
1479 * Check before we set max_hole_start, otherwise we could end up
1480 * sending back this offset anyway.
1481 */
1482 if (contains_pending_extent(device, hole_start, *hole_size)) {
1483 if (hole_end >= *hole_start)
1484 *hole_size = hole_end - *hole_start;
1485 else
1486 *hole_size = 0;
1487 changed = true;
1488 }
1489
1490 switch (device->fs_devices->chunk_alloc_policy) {
1491 case BTRFS_CHUNK_ALLOC_REGULAR:
1492 /* No extra check */
1493 break;
1494 case BTRFS_CHUNK_ALLOC_ZONED:
1495 if (dev_extent_hole_check_zoned(device, hole_start,
1496 hole_size, num_bytes)) {
1497 changed = true;
1498 /*
1499 * The changed hole can contain pending extent.
1500 * Loop again to check that.
1501 */
1502 continue;
1503 }
1504 break;
1505 default:
1506 BUG();
1507 }
1508
1509 break;
1510 }
1511
1512 return changed;
1513 }
1514
1515 /*
1516 * find_free_dev_extent_start - find free space in the specified device
1517 * @device: the device which we search the free space in
1518 * @num_bytes: the size of the free space that we need
1519 * @search_start: the position from which to begin the search
1520 * @start: store the start of the free space.
1521 * @len: the size of the free space. that we find, or the size
1522 * of the max free space if we don't find suitable free space
1523 *
1524 * this uses a pretty simple search, the expectation is that it is
1525 * called very infrequently and that a given device has a small number
1526 * of extents
1527 *
1528 * @start is used to store the start of the free space if we find. But if we
1529 * don't find suitable free space, it will be used to store the start position
1530 * of the max free space.
1531 *
1532 * @len is used to store the size of the free space that we find.
1533 * But if we don't find suitable free space, it is used to store the size of
1534 * the max free space.
1535 *
1536 * NOTE: This function will search *commit* root of device tree, and does extra
1537 * check to ensure dev extents are not double allocated.
1538 * This makes the function safe to allocate dev extents but may not report
1539 * correct usable device space, as device extent freed in current transaction
1540 * is not reported as available.
1541 */
find_free_dev_extent_start(struct btrfs_device * device,u64 num_bytes,u64 search_start,u64 * start,u64 * len)1542 static int find_free_dev_extent_start(struct btrfs_device *device,
1543 u64 num_bytes, u64 search_start, u64 *start,
1544 u64 *len)
1545 {
1546 struct btrfs_fs_info *fs_info = device->fs_info;
1547 struct btrfs_root *root = fs_info->dev_root;
1548 struct btrfs_key key;
1549 struct btrfs_dev_extent *dev_extent;
1550 struct btrfs_path *path;
1551 u64 hole_size;
1552 u64 max_hole_start;
1553 u64 max_hole_size;
1554 u64 extent_end;
1555 u64 search_end = device->total_bytes;
1556 int ret;
1557 int slot;
1558 struct extent_buffer *l;
1559
1560 search_start = dev_extent_search_start(device, search_start);
1561
1562 WARN_ON(device->zone_info &&
1563 !IS_ALIGNED(num_bytes, device->zone_info->zone_size));
1564
1565 path = btrfs_alloc_path();
1566 if (!path)
1567 return -ENOMEM;
1568
1569 max_hole_start = search_start;
1570 max_hole_size = 0;
1571
1572 again:
1573 if (search_start >= search_end ||
1574 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1575 ret = -ENOSPC;
1576 goto out;
1577 }
1578
1579 path->reada = READA_FORWARD;
1580 path->search_commit_root = 1;
1581 path->skip_locking = 1;
1582
1583 key.objectid = device->devid;
1584 key.offset = search_start;
1585 key.type = BTRFS_DEV_EXTENT_KEY;
1586
1587 ret = btrfs_search_backwards(root, &key, path);
1588 if (ret < 0)
1589 goto out;
1590
1591 while (1) {
1592 l = path->nodes[0];
1593 slot = path->slots[0];
1594 if (slot >= btrfs_header_nritems(l)) {
1595 ret = btrfs_next_leaf(root, path);
1596 if (ret == 0)
1597 continue;
1598 if (ret < 0)
1599 goto out;
1600
1601 break;
1602 }
1603 btrfs_item_key_to_cpu(l, &key, slot);
1604
1605 if (key.objectid < device->devid)
1606 goto next;
1607
1608 if (key.objectid > device->devid)
1609 break;
1610
1611 if (key.type != BTRFS_DEV_EXTENT_KEY)
1612 goto next;
1613
1614 if (key.offset > search_start) {
1615 hole_size = key.offset - search_start;
1616 dev_extent_hole_check(device, &search_start, &hole_size,
1617 num_bytes);
1618
1619 if (hole_size > max_hole_size) {
1620 max_hole_start = search_start;
1621 max_hole_size = hole_size;
1622 }
1623
1624 /*
1625 * If this free space is greater than which we need,
1626 * it must be the max free space that we have found
1627 * until now, so max_hole_start must point to the start
1628 * of this free space and the length of this free space
1629 * is stored in max_hole_size. Thus, we return
1630 * max_hole_start and max_hole_size and go back to the
1631 * caller.
1632 */
1633 if (hole_size >= num_bytes) {
1634 ret = 0;
1635 goto out;
1636 }
1637 }
1638
1639 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1640 extent_end = key.offset + btrfs_dev_extent_length(l,
1641 dev_extent);
1642 if (extent_end > search_start)
1643 search_start = extent_end;
1644 next:
1645 path->slots[0]++;
1646 cond_resched();
1647 }
1648
1649 /*
1650 * At this point, search_start should be the end of
1651 * allocated dev extents, and when shrinking the device,
1652 * search_end may be smaller than search_start.
1653 */
1654 if (search_end > search_start) {
1655 hole_size = search_end - search_start;
1656 if (dev_extent_hole_check(device, &search_start, &hole_size,
1657 num_bytes)) {
1658 btrfs_release_path(path);
1659 goto again;
1660 }
1661
1662 if (hole_size > max_hole_size) {
1663 max_hole_start = search_start;
1664 max_hole_size = hole_size;
1665 }
1666 }
1667
1668 /* See above. */
1669 if (max_hole_size < num_bytes)
1670 ret = -ENOSPC;
1671 else
1672 ret = 0;
1673
1674 out:
1675 btrfs_free_path(path);
1676 *start = max_hole_start;
1677 if (len)
1678 *len = max_hole_size;
1679 return ret;
1680 }
1681
find_free_dev_extent(struct btrfs_device * device,u64 num_bytes,u64 * start,u64 * len)1682 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1683 u64 *start, u64 *len)
1684 {
1685 /* FIXME use last free of some kind */
1686 return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1687 }
1688
btrfs_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 start,u64 * dev_extent_len)1689 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1690 struct btrfs_device *device,
1691 u64 start, u64 *dev_extent_len)
1692 {
1693 struct btrfs_fs_info *fs_info = device->fs_info;
1694 struct btrfs_root *root = fs_info->dev_root;
1695 int ret;
1696 struct btrfs_path *path;
1697 struct btrfs_key key;
1698 struct btrfs_key found_key;
1699 struct extent_buffer *leaf = NULL;
1700 struct btrfs_dev_extent *extent = NULL;
1701
1702 path = btrfs_alloc_path();
1703 if (!path)
1704 return -ENOMEM;
1705
1706 key.objectid = device->devid;
1707 key.offset = start;
1708 key.type = BTRFS_DEV_EXTENT_KEY;
1709 again:
1710 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1711 if (ret > 0) {
1712 ret = btrfs_previous_item(root, path, key.objectid,
1713 BTRFS_DEV_EXTENT_KEY);
1714 if (ret)
1715 goto out;
1716 leaf = path->nodes[0];
1717 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1718 extent = btrfs_item_ptr(leaf, path->slots[0],
1719 struct btrfs_dev_extent);
1720 BUG_ON(found_key.offset > start || found_key.offset +
1721 btrfs_dev_extent_length(leaf, extent) < start);
1722 key = found_key;
1723 btrfs_release_path(path);
1724 goto again;
1725 } else if (ret == 0) {
1726 leaf = path->nodes[0];
1727 extent = btrfs_item_ptr(leaf, path->slots[0],
1728 struct btrfs_dev_extent);
1729 } else {
1730 goto out;
1731 }
1732
1733 *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1734
1735 ret = btrfs_del_item(trans, root, path);
1736 if (ret == 0)
1737 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1738 out:
1739 btrfs_free_path(path);
1740 return ret;
1741 }
1742
find_next_chunk(struct btrfs_fs_info * fs_info)1743 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1744 {
1745 struct extent_map_tree *em_tree;
1746 struct extent_map *em;
1747 struct rb_node *n;
1748 u64 ret = 0;
1749
1750 em_tree = &fs_info->mapping_tree;
1751 read_lock(&em_tree->lock);
1752 n = rb_last(&em_tree->map.rb_root);
1753 if (n) {
1754 em = rb_entry(n, struct extent_map, rb_node);
1755 ret = em->start + em->len;
1756 }
1757 read_unlock(&em_tree->lock);
1758
1759 return ret;
1760 }
1761
find_next_devid(struct btrfs_fs_info * fs_info,u64 * devid_ret)1762 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1763 u64 *devid_ret)
1764 {
1765 int ret;
1766 struct btrfs_key key;
1767 struct btrfs_key found_key;
1768 struct btrfs_path *path;
1769
1770 path = btrfs_alloc_path();
1771 if (!path)
1772 return -ENOMEM;
1773
1774 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1775 key.type = BTRFS_DEV_ITEM_KEY;
1776 key.offset = (u64)-1;
1777
1778 ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1779 if (ret < 0)
1780 goto error;
1781
1782 if (ret == 0) {
1783 /* Corruption */
1784 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1785 ret = -EUCLEAN;
1786 goto error;
1787 }
1788
1789 ret = btrfs_previous_item(fs_info->chunk_root, path,
1790 BTRFS_DEV_ITEMS_OBJECTID,
1791 BTRFS_DEV_ITEM_KEY);
1792 if (ret) {
1793 *devid_ret = 1;
1794 } else {
1795 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1796 path->slots[0]);
1797 *devid_ret = found_key.offset + 1;
1798 }
1799 ret = 0;
1800 error:
1801 btrfs_free_path(path);
1802 return ret;
1803 }
1804
1805 /*
1806 * the device information is stored in the chunk root
1807 * the btrfs_device struct should be fully filled in
1808 */
btrfs_add_dev_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)1809 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1810 struct btrfs_device *device)
1811 {
1812 int ret;
1813 struct btrfs_path *path;
1814 struct btrfs_dev_item *dev_item;
1815 struct extent_buffer *leaf;
1816 struct btrfs_key key;
1817 unsigned long ptr;
1818
1819 path = btrfs_alloc_path();
1820 if (!path)
1821 return -ENOMEM;
1822
1823 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1824 key.type = BTRFS_DEV_ITEM_KEY;
1825 key.offset = device->devid;
1826
1827 btrfs_reserve_chunk_metadata(trans, true);
1828 ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1829 &key, sizeof(*dev_item));
1830 btrfs_trans_release_chunk_metadata(trans);
1831 if (ret)
1832 goto out;
1833
1834 leaf = path->nodes[0];
1835 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1836
1837 btrfs_set_device_id(leaf, dev_item, device->devid);
1838 btrfs_set_device_generation(leaf, dev_item, 0);
1839 btrfs_set_device_type(leaf, dev_item, device->type);
1840 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1841 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1842 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1843 btrfs_set_device_total_bytes(leaf, dev_item,
1844 btrfs_device_get_disk_total_bytes(device));
1845 btrfs_set_device_bytes_used(leaf, dev_item,
1846 btrfs_device_get_bytes_used(device));
1847 btrfs_set_device_group(leaf, dev_item, 0);
1848 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1849 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1850 btrfs_set_device_start_offset(leaf, dev_item, 0);
1851
1852 ptr = btrfs_device_uuid(dev_item);
1853 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1854 ptr = btrfs_device_fsid(dev_item);
1855 write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1856 ptr, BTRFS_FSID_SIZE);
1857 btrfs_mark_buffer_dirty(leaf);
1858
1859 ret = 0;
1860 out:
1861 btrfs_free_path(path);
1862 return ret;
1863 }
1864
1865 /*
1866 * Function to update ctime/mtime for a given device path.
1867 * Mainly used for ctime/mtime based probe like libblkid.
1868 *
1869 * We don't care about errors here, this is just to be kind to userspace.
1870 */
update_dev_time(const char * device_path)1871 static void update_dev_time(const char *device_path)
1872 {
1873 struct path path;
1874 struct timespec64 now;
1875 int ret;
1876
1877 ret = kern_path(device_path, LOOKUP_FOLLOW, &path);
1878 if (ret)
1879 return;
1880
1881 now = current_time(d_inode(path.dentry));
1882 inode_update_time(d_inode(path.dentry), &now, S_MTIME | S_CTIME);
1883 path_put(&path);
1884 }
1885
btrfs_rm_dev_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)1886 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
1887 struct btrfs_device *device)
1888 {
1889 struct btrfs_root *root = device->fs_info->chunk_root;
1890 int ret;
1891 struct btrfs_path *path;
1892 struct btrfs_key key;
1893
1894 path = btrfs_alloc_path();
1895 if (!path)
1896 return -ENOMEM;
1897
1898 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1899 key.type = BTRFS_DEV_ITEM_KEY;
1900 key.offset = device->devid;
1901
1902 btrfs_reserve_chunk_metadata(trans, false);
1903 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1904 btrfs_trans_release_chunk_metadata(trans);
1905 if (ret) {
1906 if (ret > 0)
1907 ret = -ENOENT;
1908 goto out;
1909 }
1910
1911 ret = btrfs_del_item(trans, root, path);
1912 out:
1913 btrfs_free_path(path);
1914 return ret;
1915 }
1916
1917 /*
1918 * Verify that @num_devices satisfies the RAID profile constraints in the whole
1919 * filesystem. It's up to the caller to adjust that number regarding eg. device
1920 * replace.
1921 */
btrfs_check_raid_min_devices(struct btrfs_fs_info * fs_info,u64 num_devices)1922 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1923 u64 num_devices)
1924 {
1925 u64 all_avail;
1926 unsigned seq;
1927 int i;
1928
1929 do {
1930 seq = read_seqbegin(&fs_info->profiles_lock);
1931
1932 all_avail = fs_info->avail_data_alloc_bits |
1933 fs_info->avail_system_alloc_bits |
1934 fs_info->avail_metadata_alloc_bits;
1935 } while (read_seqretry(&fs_info->profiles_lock, seq));
1936
1937 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1938 if (!(all_avail & btrfs_raid_array[i].bg_flag))
1939 continue;
1940
1941 if (num_devices < btrfs_raid_array[i].devs_min)
1942 return btrfs_raid_array[i].mindev_error;
1943 }
1944
1945 return 0;
1946 }
1947
btrfs_find_next_active_device(struct btrfs_fs_devices * fs_devs,struct btrfs_device * device)1948 static struct btrfs_device * btrfs_find_next_active_device(
1949 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1950 {
1951 struct btrfs_device *next_device;
1952
1953 list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1954 if (next_device != device &&
1955 !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1956 && next_device->bdev)
1957 return next_device;
1958 }
1959
1960 return NULL;
1961 }
1962
1963 /*
1964 * Helper function to check if the given device is part of s_bdev / latest_dev
1965 * and replace it with the provided or the next active device, in the context
1966 * where this function called, there should be always be another device (or
1967 * this_dev) which is active.
1968 */
btrfs_assign_next_active_device(struct btrfs_device * device,struct btrfs_device * next_device)1969 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
1970 struct btrfs_device *next_device)
1971 {
1972 struct btrfs_fs_info *fs_info = device->fs_info;
1973
1974 if (!next_device)
1975 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
1976 device);
1977 ASSERT(next_device);
1978
1979 if (fs_info->sb->s_bdev &&
1980 (fs_info->sb->s_bdev == device->bdev))
1981 fs_info->sb->s_bdev = next_device->bdev;
1982
1983 if (fs_info->fs_devices->latest_dev->bdev == device->bdev)
1984 fs_info->fs_devices->latest_dev = next_device;
1985 }
1986
1987 /*
1988 * Return btrfs_fs_devices::num_devices excluding the device that's being
1989 * currently replaced.
1990 */
btrfs_num_devices(struct btrfs_fs_info * fs_info)1991 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
1992 {
1993 u64 num_devices = fs_info->fs_devices->num_devices;
1994
1995 down_read(&fs_info->dev_replace.rwsem);
1996 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
1997 ASSERT(num_devices > 1);
1998 num_devices--;
1999 }
2000 up_read(&fs_info->dev_replace.rwsem);
2001
2002 return num_devices;
2003 }
2004
btrfs_scratch_superblocks(struct btrfs_fs_info * fs_info,struct block_device * bdev,const char * device_path)2005 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2006 struct block_device *bdev,
2007 const char *device_path)
2008 {
2009 struct btrfs_super_block *disk_super;
2010 int copy_num;
2011
2012 if (!bdev)
2013 return;
2014
2015 for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2016 struct page *page;
2017 int ret;
2018
2019 disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2020 if (IS_ERR(disk_super))
2021 continue;
2022
2023 if (bdev_is_zoned(bdev)) {
2024 btrfs_reset_sb_log_zones(bdev, copy_num);
2025 continue;
2026 }
2027
2028 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2029
2030 page = virt_to_page(disk_super);
2031 set_page_dirty(page);
2032 lock_page(page);
2033 /* write_on_page() unlocks the page */
2034 ret = write_one_page(page);
2035 if (ret)
2036 btrfs_warn(fs_info,
2037 "error clearing superblock number %d (%d)",
2038 copy_num, ret);
2039 btrfs_release_disk_super(disk_super);
2040
2041 }
2042
2043 /* Notify udev that device has changed */
2044 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2045
2046 /* Update ctime/mtime for device path for libblkid */
2047 update_dev_time(device_path);
2048 }
2049
btrfs_rm_device(struct btrfs_fs_info * fs_info,struct btrfs_dev_lookup_args * args,struct block_device ** bdev,fmode_t * mode)2050 int btrfs_rm_device(struct btrfs_fs_info *fs_info,
2051 struct btrfs_dev_lookup_args *args,
2052 struct block_device **bdev, fmode_t *mode)
2053 {
2054 struct btrfs_trans_handle *trans;
2055 struct btrfs_device *device;
2056 struct btrfs_fs_devices *cur_devices;
2057 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2058 u64 num_devices;
2059 int ret = 0;
2060
2061 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
2062 btrfs_err(fs_info, "device remove not supported on extent tree v2 yet");
2063 return -EINVAL;
2064 }
2065
2066 /*
2067 * The device list in fs_devices is accessed without locks (neither
2068 * uuid_mutex nor device_list_mutex) as it won't change on a mounted
2069 * filesystem and another device rm cannot run.
2070 */
2071 num_devices = btrfs_num_devices(fs_info);
2072
2073 ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2074 if (ret)
2075 return ret;
2076
2077 device = btrfs_find_device(fs_info->fs_devices, args);
2078 if (!device) {
2079 if (args->missing)
2080 ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2081 else
2082 ret = -ENOENT;
2083 return ret;
2084 }
2085
2086 if (btrfs_pinned_by_swapfile(fs_info, device)) {
2087 btrfs_warn_in_rcu(fs_info,
2088 "cannot remove device %s (devid %llu) due to active swapfile",
2089 rcu_str_deref(device->name), device->devid);
2090 return -ETXTBSY;
2091 }
2092
2093 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
2094 return BTRFS_ERROR_DEV_TGT_REPLACE;
2095
2096 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2097 fs_info->fs_devices->rw_devices == 1)
2098 return BTRFS_ERROR_DEV_ONLY_WRITABLE;
2099
2100 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2101 mutex_lock(&fs_info->chunk_mutex);
2102 list_del_init(&device->dev_alloc_list);
2103 device->fs_devices->rw_devices--;
2104 mutex_unlock(&fs_info->chunk_mutex);
2105 }
2106
2107 ret = btrfs_shrink_device(device, 0);
2108 if (ret)
2109 goto error_undo;
2110
2111 trans = btrfs_start_transaction(fs_info->chunk_root, 0);
2112 if (IS_ERR(trans)) {
2113 ret = PTR_ERR(trans);
2114 goto error_undo;
2115 }
2116
2117 ret = btrfs_rm_dev_item(trans, device);
2118 if (ret) {
2119 /* Any error in dev item removal is critical */
2120 btrfs_crit(fs_info,
2121 "failed to remove device item for devid %llu: %d",
2122 device->devid, ret);
2123 btrfs_abort_transaction(trans, ret);
2124 btrfs_end_transaction(trans);
2125 return ret;
2126 }
2127
2128 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2129 btrfs_scrub_cancel_dev(device);
2130
2131 /*
2132 * the device list mutex makes sure that we don't change
2133 * the device list while someone else is writing out all
2134 * the device supers. Whoever is writing all supers, should
2135 * lock the device list mutex before getting the number of
2136 * devices in the super block (super_copy). Conversely,
2137 * whoever updates the number of devices in the super block
2138 * (super_copy) should hold the device list mutex.
2139 */
2140
2141 /*
2142 * In normal cases the cur_devices == fs_devices. But in case
2143 * of deleting a seed device, the cur_devices should point to
2144 * its own fs_devices listed under the fs_devices->seed_list.
2145 */
2146 cur_devices = device->fs_devices;
2147 mutex_lock(&fs_devices->device_list_mutex);
2148 list_del_rcu(&device->dev_list);
2149
2150 cur_devices->num_devices--;
2151 cur_devices->total_devices--;
2152 /* Update total_devices of the parent fs_devices if it's seed */
2153 if (cur_devices != fs_devices)
2154 fs_devices->total_devices--;
2155
2156 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2157 cur_devices->missing_devices--;
2158
2159 btrfs_assign_next_active_device(device, NULL);
2160
2161 if (device->bdev) {
2162 cur_devices->open_devices--;
2163 /* remove sysfs entry */
2164 btrfs_sysfs_remove_device(device);
2165 }
2166
2167 num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2168 btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2169 mutex_unlock(&fs_devices->device_list_mutex);
2170
2171 /*
2172 * At this point, the device is zero sized and detached from the
2173 * devices list. All that's left is to zero out the old supers and
2174 * free the device.
2175 *
2176 * We cannot call btrfs_close_bdev() here because we're holding the sb
2177 * write lock, and blkdev_put() will pull in the ->open_mutex on the
2178 * block device and it's dependencies. Instead just flush the device
2179 * and let the caller do the final blkdev_put.
2180 */
2181 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2182 btrfs_scratch_superblocks(fs_info, device->bdev,
2183 device->name->str);
2184 if (device->bdev) {
2185 sync_blockdev(device->bdev);
2186 invalidate_bdev(device->bdev);
2187 }
2188 }
2189
2190 *bdev = device->bdev;
2191 *mode = device->mode;
2192 synchronize_rcu();
2193 btrfs_free_device(device);
2194
2195 /*
2196 * This can happen if cur_devices is the private seed devices list. We
2197 * cannot call close_fs_devices() here because it expects the uuid_mutex
2198 * to be held, but in fact we don't need that for the private
2199 * seed_devices, we can simply decrement cur_devices->opened and then
2200 * remove it from our list and free the fs_devices.
2201 */
2202 if (cur_devices->num_devices == 0) {
2203 list_del_init(&cur_devices->seed_list);
2204 ASSERT(cur_devices->opened == 1);
2205 cur_devices->opened--;
2206 free_fs_devices(cur_devices);
2207 }
2208
2209 ret = btrfs_commit_transaction(trans);
2210
2211 return ret;
2212
2213 error_undo:
2214 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2215 mutex_lock(&fs_info->chunk_mutex);
2216 list_add(&device->dev_alloc_list,
2217 &fs_devices->alloc_list);
2218 device->fs_devices->rw_devices++;
2219 mutex_unlock(&fs_info->chunk_mutex);
2220 }
2221 return ret;
2222 }
2223
btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device * srcdev)2224 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2225 {
2226 struct btrfs_fs_devices *fs_devices;
2227
2228 lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2229
2230 /*
2231 * in case of fs with no seed, srcdev->fs_devices will point
2232 * to fs_devices of fs_info. However when the dev being replaced is
2233 * a seed dev it will point to the seed's local fs_devices. In short
2234 * srcdev will have its correct fs_devices in both the cases.
2235 */
2236 fs_devices = srcdev->fs_devices;
2237
2238 list_del_rcu(&srcdev->dev_list);
2239 list_del(&srcdev->dev_alloc_list);
2240 fs_devices->num_devices--;
2241 if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2242 fs_devices->missing_devices--;
2243
2244 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2245 fs_devices->rw_devices--;
2246
2247 if (srcdev->bdev)
2248 fs_devices->open_devices--;
2249 }
2250
btrfs_rm_dev_replace_free_srcdev(struct btrfs_device * srcdev)2251 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2252 {
2253 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2254
2255 mutex_lock(&uuid_mutex);
2256
2257 btrfs_close_bdev(srcdev);
2258 synchronize_rcu();
2259 btrfs_free_device(srcdev);
2260
2261 /* if this is no devs we rather delete the fs_devices */
2262 if (!fs_devices->num_devices) {
2263 /*
2264 * On a mounted FS, num_devices can't be zero unless it's a
2265 * seed. In case of a seed device being replaced, the replace
2266 * target added to the sprout FS, so there will be no more
2267 * device left under the seed FS.
2268 */
2269 ASSERT(fs_devices->seeding);
2270
2271 list_del_init(&fs_devices->seed_list);
2272 close_fs_devices(fs_devices);
2273 free_fs_devices(fs_devices);
2274 }
2275 mutex_unlock(&uuid_mutex);
2276 }
2277
btrfs_destroy_dev_replace_tgtdev(struct btrfs_device * tgtdev)2278 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2279 {
2280 struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2281
2282 mutex_lock(&fs_devices->device_list_mutex);
2283
2284 btrfs_sysfs_remove_device(tgtdev);
2285
2286 if (tgtdev->bdev)
2287 fs_devices->open_devices--;
2288
2289 fs_devices->num_devices--;
2290
2291 btrfs_assign_next_active_device(tgtdev, NULL);
2292
2293 list_del_rcu(&tgtdev->dev_list);
2294
2295 mutex_unlock(&fs_devices->device_list_mutex);
2296
2297 btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2298 tgtdev->name->str);
2299
2300 btrfs_close_bdev(tgtdev);
2301 synchronize_rcu();
2302 btrfs_free_device(tgtdev);
2303 }
2304
2305 /**
2306 * Populate args from device at path
2307 *
2308 * @fs_info: the filesystem
2309 * @args: the args to populate
2310 * @path: the path to the device
2311 *
2312 * This will read the super block of the device at @path and populate @args with
2313 * the devid, fsid, and uuid. This is meant to be used for ioctls that need to
2314 * lookup a device to operate on, but need to do it before we take any locks.
2315 * This properly handles the special case of "missing" that a user may pass in,
2316 * and does some basic sanity checks. The caller must make sure that @path is
2317 * properly NUL terminated before calling in, and must call
2318 * btrfs_put_dev_args_from_path() in order to free up the temporary fsid and
2319 * uuid buffers.
2320 *
2321 * Return: 0 for success, -errno for failure
2322 */
btrfs_get_dev_args_from_path(struct btrfs_fs_info * fs_info,struct btrfs_dev_lookup_args * args,const char * path)2323 int btrfs_get_dev_args_from_path(struct btrfs_fs_info *fs_info,
2324 struct btrfs_dev_lookup_args *args,
2325 const char *path)
2326 {
2327 struct btrfs_super_block *disk_super;
2328 struct block_device *bdev;
2329 int ret;
2330
2331 if (!path || !path[0])
2332 return -EINVAL;
2333 if (!strcmp(path, "missing")) {
2334 args->missing = true;
2335 return 0;
2336 }
2337
2338 args->uuid = kzalloc(BTRFS_UUID_SIZE, GFP_KERNEL);
2339 args->fsid = kzalloc(BTRFS_FSID_SIZE, GFP_KERNEL);
2340 if (!args->uuid || !args->fsid) {
2341 btrfs_put_dev_args_from_path(args);
2342 return -ENOMEM;
2343 }
2344
2345 ret = btrfs_get_bdev_and_sb(path, FMODE_READ, fs_info->bdev_holder, 0,
2346 &bdev, &disk_super);
2347 if (ret) {
2348 btrfs_put_dev_args_from_path(args);
2349 return ret;
2350 }
2351
2352 args->devid = btrfs_stack_device_id(&disk_super->dev_item);
2353 memcpy(args->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE);
2354 if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2355 memcpy(args->fsid, disk_super->metadata_uuid, BTRFS_FSID_SIZE);
2356 else
2357 memcpy(args->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
2358 btrfs_release_disk_super(disk_super);
2359 blkdev_put(bdev, FMODE_READ);
2360 return 0;
2361 }
2362
2363 /*
2364 * Only use this jointly with btrfs_get_dev_args_from_path() because we will
2365 * allocate our ->uuid and ->fsid pointers, everybody else uses local variables
2366 * that don't need to be freed.
2367 */
btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args * args)2368 void btrfs_put_dev_args_from_path(struct btrfs_dev_lookup_args *args)
2369 {
2370 kfree(args->uuid);
2371 kfree(args->fsid);
2372 args->uuid = NULL;
2373 args->fsid = NULL;
2374 }
2375
btrfs_find_device_by_devspec(struct btrfs_fs_info * fs_info,u64 devid,const char * device_path)2376 struct btrfs_device *btrfs_find_device_by_devspec(
2377 struct btrfs_fs_info *fs_info, u64 devid,
2378 const char *device_path)
2379 {
2380 BTRFS_DEV_LOOKUP_ARGS(args);
2381 struct btrfs_device *device;
2382 int ret;
2383
2384 if (devid) {
2385 args.devid = devid;
2386 device = btrfs_find_device(fs_info->fs_devices, &args);
2387 if (!device)
2388 return ERR_PTR(-ENOENT);
2389 return device;
2390 }
2391
2392 ret = btrfs_get_dev_args_from_path(fs_info, &args, device_path);
2393 if (ret)
2394 return ERR_PTR(ret);
2395 device = btrfs_find_device(fs_info->fs_devices, &args);
2396 btrfs_put_dev_args_from_path(&args);
2397 if (!device)
2398 return ERR_PTR(-ENOENT);
2399 return device;
2400 }
2401
btrfs_init_sprout(struct btrfs_fs_info * fs_info)2402 static struct btrfs_fs_devices *btrfs_init_sprout(struct btrfs_fs_info *fs_info)
2403 {
2404 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2405 struct btrfs_fs_devices *old_devices;
2406 struct btrfs_fs_devices *seed_devices;
2407
2408 lockdep_assert_held(&uuid_mutex);
2409 if (!fs_devices->seeding)
2410 return ERR_PTR(-EINVAL);
2411
2412 /*
2413 * Private copy of the seed devices, anchored at
2414 * fs_info->fs_devices->seed_list
2415 */
2416 seed_devices = alloc_fs_devices(NULL, NULL);
2417 if (IS_ERR(seed_devices))
2418 return seed_devices;
2419
2420 /*
2421 * It's necessary to retain a copy of the original seed fs_devices in
2422 * fs_uuids so that filesystems which have been seeded can successfully
2423 * reference the seed device from open_seed_devices. This also supports
2424 * multiple fs seed.
2425 */
2426 old_devices = clone_fs_devices(fs_devices);
2427 if (IS_ERR(old_devices)) {
2428 kfree(seed_devices);
2429 return old_devices;
2430 }
2431
2432 list_add(&old_devices->fs_list, &fs_uuids);
2433
2434 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2435 seed_devices->opened = 1;
2436 INIT_LIST_HEAD(&seed_devices->devices);
2437 INIT_LIST_HEAD(&seed_devices->alloc_list);
2438 mutex_init(&seed_devices->device_list_mutex);
2439
2440 return seed_devices;
2441 }
2442
2443 /*
2444 * Splice seed devices into the sprout fs_devices.
2445 * Generate a new fsid for the sprouted read-write filesystem.
2446 */
btrfs_setup_sprout(struct btrfs_fs_info * fs_info,struct btrfs_fs_devices * seed_devices)2447 static void btrfs_setup_sprout(struct btrfs_fs_info *fs_info,
2448 struct btrfs_fs_devices *seed_devices)
2449 {
2450 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2451 struct btrfs_super_block *disk_super = fs_info->super_copy;
2452 struct btrfs_device *device;
2453 u64 super_flags;
2454
2455 /*
2456 * We are updating the fsid, the thread leading to device_list_add()
2457 * could race, so uuid_mutex is needed.
2458 */
2459 lockdep_assert_held(&uuid_mutex);
2460
2461 /*
2462 * The threads listed below may traverse dev_list but can do that without
2463 * device_list_mutex:
2464 * - All device ops and balance - as we are in btrfs_exclop_start.
2465 * - Various dev_list readers - are using RCU.
2466 * - btrfs_ioctl_fitrim() - is using RCU.
2467 *
2468 * For-read threads as below are using device_list_mutex:
2469 * - Readonly scrub btrfs_scrub_dev()
2470 * - Readonly scrub btrfs_scrub_progress()
2471 * - btrfs_get_dev_stats()
2472 */
2473 lockdep_assert_held(&fs_devices->device_list_mutex);
2474
2475 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2476 synchronize_rcu);
2477 list_for_each_entry(device, &seed_devices->devices, dev_list)
2478 device->fs_devices = seed_devices;
2479
2480 fs_devices->seeding = false;
2481 fs_devices->num_devices = 0;
2482 fs_devices->open_devices = 0;
2483 fs_devices->missing_devices = 0;
2484 fs_devices->rotating = false;
2485 list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2486
2487 generate_random_uuid(fs_devices->fsid);
2488 memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2489 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2490
2491 super_flags = btrfs_super_flags(disk_super) &
2492 ~BTRFS_SUPER_FLAG_SEEDING;
2493 btrfs_set_super_flags(disk_super, super_flags);
2494 }
2495
2496 /*
2497 * Store the expected generation for seed devices in device items.
2498 */
btrfs_finish_sprout(struct btrfs_trans_handle * trans)2499 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2500 {
2501 BTRFS_DEV_LOOKUP_ARGS(args);
2502 struct btrfs_fs_info *fs_info = trans->fs_info;
2503 struct btrfs_root *root = fs_info->chunk_root;
2504 struct btrfs_path *path;
2505 struct extent_buffer *leaf;
2506 struct btrfs_dev_item *dev_item;
2507 struct btrfs_device *device;
2508 struct btrfs_key key;
2509 u8 fs_uuid[BTRFS_FSID_SIZE];
2510 u8 dev_uuid[BTRFS_UUID_SIZE];
2511 int ret;
2512
2513 path = btrfs_alloc_path();
2514 if (!path)
2515 return -ENOMEM;
2516
2517 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2518 key.offset = 0;
2519 key.type = BTRFS_DEV_ITEM_KEY;
2520
2521 while (1) {
2522 btrfs_reserve_chunk_metadata(trans, false);
2523 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2524 btrfs_trans_release_chunk_metadata(trans);
2525 if (ret < 0)
2526 goto error;
2527
2528 leaf = path->nodes[0];
2529 next_slot:
2530 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2531 ret = btrfs_next_leaf(root, path);
2532 if (ret > 0)
2533 break;
2534 if (ret < 0)
2535 goto error;
2536 leaf = path->nodes[0];
2537 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2538 btrfs_release_path(path);
2539 continue;
2540 }
2541
2542 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2543 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2544 key.type != BTRFS_DEV_ITEM_KEY)
2545 break;
2546
2547 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2548 struct btrfs_dev_item);
2549 args.devid = btrfs_device_id(leaf, dev_item);
2550 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2551 BTRFS_UUID_SIZE);
2552 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2553 BTRFS_FSID_SIZE);
2554 args.uuid = dev_uuid;
2555 args.fsid = fs_uuid;
2556 device = btrfs_find_device(fs_info->fs_devices, &args);
2557 BUG_ON(!device); /* Logic error */
2558
2559 if (device->fs_devices->seeding) {
2560 btrfs_set_device_generation(leaf, dev_item,
2561 device->generation);
2562 btrfs_mark_buffer_dirty(leaf);
2563 }
2564
2565 path->slots[0]++;
2566 goto next_slot;
2567 }
2568 ret = 0;
2569 error:
2570 btrfs_free_path(path);
2571 return ret;
2572 }
2573
btrfs_init_new_device(struct btrfs_fs_info * fs_info,const char * device_path)2574 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2575 {
2576 struct btrfs_root *root = fs_info->dev_root;
2577 struct btrfs_trans_handle *trans;
2578 struct btrfs_device *device;
2579 struct block_device *bdev;
2580 struct super_block *sb = fs_info->sb;
2581 struct rcu_string *name;
2582 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2583 struct btrfs_fs_devices *seed_devices;
2584 u64 orig_super_total_bytes;
2585 u64 orig_super_num_devices;
2586 int ret = 0;
2587 bool seeding_dev = false;
2588 bool locked = false;
2589
2590 if (sb_rdonly(sb) && !fs_devices->seeding)
2591 return -EROFS;
2592
2593 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2594 fs_info->bdev_holder);
2595 if (IS_ERR(bdev))
2596 return PTR_ERR(bdev);
2597
2598 if (!btrfs_check_device_zone_type(fs_info, bdev)) {
2599 ret = -EINVAL;
2600 goto error;
2601 }
2602
2603 if (fs_devices->seeding) {
2604 seeding_dev = true;
2605 down_write(&sb->s_umount);
2606 mutex_lock(&uuid_mutex);
2607 locked = true;
2608 }
2609
2610 sync_blockdev(bdev);
2611
2612 rcu_read_lock();
2613 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2614 if (device->bdev == bdev) {
2615 ret = -EEXIST;
2616 rcu_read_unlock();
2617 goto error;
2618 }
2619 }
2620 rcu_read_unlock();
2621
2622 device = btrfs_alloc_device(fs_info, NULL, NULL);
2623 if (IS_ERR(device)) {
2624 /* we can safely leave the fs_devices entry around */
2625 ret = PTR_ERR(device);
2626 goto error;
2627 }
2628
2629 name = rcu_string_strdup(device_path, GFP_KERNEL);
2630 if (!name) {
2631 ret = -ENOMEM;
2632 goto error_free_device;
2633 }
2634 rcu_assign_pointer(device->name, name);
2635
2636 device->fs_info = fs_info;
2637 device->bdev = bdev;
2638 ret = lookup_bdev(device_path, &device->devt);
2639 if (ret)
2640 goto error_free_device;
2641
2642 ret = btrfs_get_dev_zone_info(device, false);
2643 if (ret)
2644 goto error_free_device;
2645
2646 trans = btrfs_start_transaction(root, 0);
2647 if (IS_ERR(trans)) {
2648 ret = PTR_ERR(trans);
2649 goto error_free_zone;
2650 }
2651
2652 set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2653 device->generation = trans->transid;
2654 device->io_width = fs_info->sectorsize;
2655 device->io_align = fs_info->sectorsize;
2656 device->sector_size = fs_info->sectorsize;
2657 device->total_bytes =
2658 round_down(bdev_nr_bytes(bdev), fs_info->sectorsize);
2659 device->disk_total_bytes = device->total_bytes;
2660 device->commit_total_bytes = device->total_bytes;
2661 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2662 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2663 device->mode = FMODE_EXCL;
2664 device->dev_stats_valid = 1;
2665 set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2666
2667 if (seeding_dev) {
2668 btrfs_clear_sb_rdonly(sb);
2669
2670 /* GFP_KERNEL allocation must not be under device_list_mutex */
2671 seed_devices = btrfs_init_sprout(fs_info);
2672 if (IS_ERR(seed_devices)) {
2673 ret = PTR_ERR(seed_devices);
2674 btrfs_abort_transaction(trans, ret);
2675 goto error_trans;
2676 }
2677 }
2678
2679 mutex_lock(&fs_devices->device_list_mutex);
2680 if (seeding_dev) {
2681 btrfs_setup_sprout(fs_info, seed_devices);
2682 btrfs_assign_next_active_device(fs_info->fs_devices->latest_dev,
2683 device);
2684 }
2685
2686 device->fs_devices = fs_devices;
2687
2688 mutex_lock(&fs_info->chunk_mutex);
2689 list_add_rcu(&device->dev_list, &fs_devices->devices);
2690 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2691 fs_devices->num_devices++;
2692 fs_devices->open_devices++;
2693 fs_devices->rw_devices++;
2694 fs_devices->total_devices++;
2695 fs_devices->total_rw_bytes += device->total_bytes;
2696
2697 atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2698
2699 if (!bdev_nonrot(bdev))
2700 fs_devices->rotating = true;
2701
2702 orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2703 btrfs_set_super_total_bytes(fs_info->super_copy,
2704 round_down(orig_super_total_bytes + device->total_bytes,
2705 fs_info->sectorsize));
2706
2707 orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2708 btrfs_set_super_num_devices(fs_info->super_copy,
2709 orig_super_num_devices + 1);
2710
2711 /*
2712 * we've got more storage, clear any full flags on the space
2713 * infos
2714 */
2715 btrfs_clear_space_info_full(fs_info);
2716
2717 mutex_unlock(&fs_info->chunk_mutex);
2718
2719 /* Add sysfs device entry */
2720 btrfs_sysfs_add_device(device);
2721
2722 mutex_unlock(&fs_devices->device_list_mutex);
2723
2724 if (seeding_dev) {
2725 mutex_lock(&fs_info->chunk_mutex);
2726 ret = init_first_rw_device(trans);
2727 mutex_unlock(&fs_info->chunk_mutex);
2728 if (ret) {
2729 btrfs_abort_transaction(trans, ret);
2730 goto error_sysfs;
2731 }
2732 }
2733
2734 ret = btrfs_add_dev_item(trans, device);
2735 if (ret) {
2736 btrfs_abort_transaction(trans, ret);
2737 goto error_sysfs;
2738 }
2739
2740 if (seeding_dev) {
2741 ret = btrfs_finish_sprout(trans);
2742 if (ret) {
2743 btrfs_abort_transaction(trans, ret);
2744 goto error_sysfs;
2745 }
2746
2747 /*
2748 * fs_devices now represents the newly sprouted filesystem and
2749 * its fsid has been changed by btrfs_sprout_splice().
2750 */
2751 btrfs_sysfs_update_sprout_fsid(fs_devices);
2752 }
2753
2754 ret = btrfs_commit_transaction(trans);
2755
2756 if (seeding_dev) {
2757 mutex_unlock(&uuid_mutex);
2758 up_write(&sb->s_umount);
2759 locked = false;
2760
2761 if (ret) /* transaction commit */
2762 return ret;
2763
2764 ret = btrfs_relocate_sys_chunks(fs_info);
2765 if (ret < 0)
2766 btrfs_handle_fs_error(fs_info, ret,
2767 "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2768 trans = btrfs_attach_transaction(root);
2769 if (IS_ERR(trans)) {
2770 if (PTR_ERR(trans) == -ENOENT)
2771 return 0;
2772 ret = PTR_ERR(trans);
2773 trans = NULL;
2774 goto error_sysfs;
2775 }
2776 ret = btrfs_commit_transaction(trans);
2777 }
2778
2779 /*
2780 * Now that we have written a new super block to this device, check all
2781 * other fs_devices list if device_path alienates any other scanned
2782 * device.
2783 * We can ignore the return value as it typically returns -EINVAL and
2784 * only succeeds if the device was an alien.
2785 */
2786 btrfs_forget_devices(device->devt);
2787
2788 /* Update ctime/mtime for blkid or udev */
2789 update_dev_time(device_path);
2790
2791 return ret;
2792
2793 error_sysfs:
2794 btrfs_sysfs_remove_device(device);
2795 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2796 mutex_lock(&fs_info->chunk_mutex);
2797 list_del_rcu(&device->dev_list);
2798 list_del(&device->dev_alloc_list);
2799 fs_info->fs_devices->num_devices--;
2800 fs_info->fs_devices->open_devices--;
2801 fs_info->fs_devices->rw_devices--;
2802 fs_info->fs_devices->total_devices--;
2803 fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2804 atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2805 btrfs_set_super_total_bytes(fs_info->super_copy,
2806 orig_super_total_bytes);
2807 btrfs_set_super_num_devices(fs_info->super_copy,
2808 orig_super_num_devices);
2809 mutex_unlock(&fs_info->chunk_mutex);
2810 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2811 error_trans:
2812 if (seeding_dev)
2813 btrfs_set_sb_rdonly(sb);
2814 if (trans)
2815 btrfs_end_transaction(trans);
2816 error_free_zone:
2817 btrfs_destroy_dev_zone_info(device);
2818 error_free_device:
2819 btrfs_free_device(device);
2820 error:
2821 blkdev_put(bdev, FMODE_EXCL);
2822 if (locked) {
2823 mutex_unlock(&uuid_mutex);
2824 up_write(&sb->s_umount);
2825 }
2826 return ret;
2827 }
2828
btrfs_update_device(struct btrfs_trans_handle * trans,struct btrfs_device * device)2829 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2830 struct btrfs_device *device)
2831 {
2832 int ret;
2833 struct btrfs_path *path;
2834 struct btrfs_root *root = device->fs_info->chunk_root;
2835 struct btrfs_dev_item *dev_item;
2836 struct extent_buffer *leaf;
2837 struct btrfs_key key;
2838
2839 path = btrfs_alloc_path();
2840 if (!path)
2841 return -ENOMEM;
2842
2843 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2844 key.type = BTRFS_DEV_ITEM_KEY;
2845 key.offset = device->devid;
2846
2847 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2848 if (ret < 0)
2849 goto out;
2850
2851 if (ret > 0) {
2852 ret = -ENOENT;
2853 goto out;
2854 }
2855
2856 leaf = path->nodes[0];
2857 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2858
2859 btrfs_set_device_id(leaf, dev_item, device->devid);
2860 btrfs_set_device_type(leaf, dev_item, device->type);
2861 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2862 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2863 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2864 btrfs_set_device_total_bytes(leaf, dev_item,
2865 btrfs_device_get_disk_total_bytes(device));
2866 btrfs_set_device_bytes_used(leaf, dev_item,
2867 btrfs_device_get_bytes_used(device));
2868 btrfs_mark_buffer_dirty(leaf);
2869
2870 out:
2871 btrfs_free_path(path);
2872 return ret;
2873 }
2874
btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)2875 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2876 struct btrfs_device *device, u64 new_size)
2877 {
2878 struct btrfs_fs_info *fs_info = device->fs_info;
2879 struct btrfs_super_block *super_copy = fs_info->super_copy;
2880 u64 old_total;
2881 u64 diff;
2882 int ret;
2883
2884 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2885 return -EACCES;
2886
2887 new_size = round_down(new_size, fs_info->sectorsize);
2888
2889 mutex_lock(&fs_info->chunk_mutex);
2890 old_total = btrfs_super_total_bytes(super_copy);
2891 diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2892
2893 if (new_size <= device->total_bytes ||
2894 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2895 mutex_unlock(&fs_info->chunk_mutex);
2896 return -EINVAL;
2897 }
2898
2899 btrfs_set_super_total_bytes(super_copy,
2900 round_down(old_total + diff, fs_info->sectorsize));
2901 device->fs_devices->total_rw_bytes += diff;
2902
2903 btrfs_device_set_total_bytes(device, new_size);
2904 btrfs_device_set_disk_total_bytes(device, new_size);
2905 btrfs_clear_space_info_full(device->fs_info);
2906 if (list_empty(&device->post_commit_list))
2907 list_add_tail(&device->post_commit_list,
2908 &trans->transaction->dev_update_list);
2909 mutex_unlock(&fs_info->chunk_mutex);
2910
2911 btrfs_reserve_chunk_metadata(trans, false);
2912 ret = btrfs_update_device(trans, device);
2913 btrfs_trans_release_chunk_metadata(trans);
2914
2915 return ret;
2916 }
2917
btrfs_free_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)2918 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2919 {
2920 struct btrfs_fs_info *fs_info = trans->fs_info;
2921 struct btrfs_root *root = fs_info->chunk_root;
2922 int ret;
2923 struct btrfs_path *path;
2924 struct btrfs_key key;
2925
2926 path = btrfs_alloc_path();
2927 if (!path)
2928 return -ENOMEM;
2929
2930 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2931 key.offset = chunk_offset;
2932 key.type = BTRFS_CHUNK_ITEM_KEY;
2933
2934 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2935 if (ret < 0)
2936 goto out;
2937 else if (ret > 0) { /* Logic error or corruption */
2938 btrfs_handle_fs_error(fs_info, -ENOENT,
2939 "Failed lookup while freeing chunk.");
2940 ret = -ENOENT;
2941 goto out;
2942 }
2943
2944 ret = btrfs_del_item(trans, root, path);
2945 if (ret < 0)
2946 btrfs_handle_fs_error(fs_info, ret,
2947 "Failed to delete chunk item.");
2948 out:
2949 btrfs_free_path(path);
2950 return ret;
2951 }
2952
btrfs_del_sys_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)2953 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2954 {
2955 struct btrfs_super_block *super_copy = fs_info->super_copy;
2956 struct btrfs_disk_key *disk_key;
2957 struct btrfs_chunk *chunk;
2958 u8 *ptr;
2959 int ret = 0;
2960 u32 num_stripes;
2961 u32 array_size;
2962 u32 len = 0;
2963 u32 cur;
2964 struct btrfs_key key;
2965
2966 lockdep_assert_held(&fs_info->chunk_mutex);
2967 array_size = btrfs_super_sys_array_size(super_copy);
2968
2969 ptr = super_copy->sys_chunk_array;
2970 cur = 0;
2971
2972 while (cur < array_size) {
2973 disk_key = (struct btrfs_disk_key *)ptr;
2974 btrfs_disk_key_to_cpu(&key, disk_key);
2975
2976 len = sizeof(*disk_key);
2977
2978 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2979 chunk = (struct btrfs_chunk *)(ptr + len);
2980 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2981 len += btrfs_chunk_item_size(num_stripes);
2982 } else {
2983 ret = -EIO;
2984 break;
2985 }
2986 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2987 key.offset == chunk_offset) {
2988 memmove(ptr, ptr + len, array_size - (cur + len));
2989 array_size -= len;
2990 btrfs_set_super_sys_array_size(super_copy, array_size);
2991 } else {
2992 ptr += len;
2993 cur += len;
2994 }
2995 }
2996 return ret;
2997 }
2998
2999 /*
3000 * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
3001 * @logical: Logical block offset in bytes.
3002 * @length: Length of extent in bytes.
3003 *
3004 * Return: Chunk mapping or ERR_PTR.
3005 */
btrfs_get_chunk_map(struct btrfs_fs_info * fs_info,u64 logical,u64 length)3006 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
3007 u64 logical, u64 length)
3008 {
3009 struct extent_map_tree *em_tree;
3010 struct extent_map *em;
3011
3012 em_tree = &fs_info->mapping_tree;
3013 read_lock(&em_tree->lock);
3014 em = lookup_extent_mapping(em_tree, logical, length);
3015 read_unlock(&em_tree->lock);
3016
3017 if (!em) {
3018 btrfs_crit(fs_info, "unable to find logical %llu length %llu",
3019 logical, length);
3020 return ERR_PTR(-EINVAL);
3021 }
3022
3023 if (em->start > logical || em->start + em->len < logical) {
3024 btrfs_crit(fs_info,
3025 "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
3026 logical, length, em->start, em->start + em->len);
3027 free_extent_map(em);
3028 return ERR_PTR(-EINVAL);
3029 }
3030
3031 /* callers are responsible for dropping em's ref. */
3032 return em;
3033 }
3034
remove_chunk_item(struct btrfs_trans_handle * trans,struct map_lookup * map,u64 chunk_offset)3035 static int remove_chunk_item(struct btrfs_trans_handle *trans,
3036 struct map_lookup *map, u64 chunk_offset)
3037 {
3038 int i;
3039
3040 /*
3041 * Removing chunk items and updating the device items in the chunks btree
3042 * requires holding the chunk_mutex.
3043 * See the comment at btrfs_chunk_alloc() for the details.
3044 */
3045 lockdep_assert_held(&trans->fs_info->chunk_mutex);
3046
3047 for (i = 0; i < map->num_stripes; i++) {
3048 int ret;
3049
3050 ret = btrfs_update_device(trans, map->stripes[i].dev);
3051 if (ret)
3052 return ret;
3053 }
3054
3055 return btrfs_free_chunk(trans, chunk_offset);
3056 }
3057
btrfs_remove_chunk(struct btrfs_trans_handle * trans,u64 chunk_offset)3058 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
3059 {
3060 struct btrfs_fs_info *fs_info = trans->fs_info;
3061 struct extent_map *em;
3062 struct map_lookup *map;
3063 u64 dev_extent_len = 0;
3064 int i, ret = 0;
3065 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3066
3067 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
3068 if (IS_ERR(em)) {
3069 /*
3070 * This is a logic error, but we don't want to just rely on the
3071 * user having built with ASSERT enabled, so if ASSERT doesn't
3072 * do anything we still error out.
3073 */
3074 ASSERT(0);
3075 return PTR_ERR(em);
3076 }
3077 map = em->map_lookup;
3078
3079 /*
3080 * First delete the device extent items from the devices btree.
3081 * We take the device_list_mutex to avoid racing with the finishing phase
3082 * of a device replace operation. See the comment below before acquiring
3083 * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
3084 * because that can result in a deadlock when deleting the device extent
3085 * items from the devices btree - COWing an extent buffer from the btree
3086 * may result in allocating a new metadata chunk, which would attempt to
3087 * lock again fs_info->chunk_mutex.
3088 */
3089 mutex_lock(&fs_devices->device_list_mutex);
3090 for (i = 0; i < map->num_stripes; i++) {
3091 struct btrfs_device *device = map->stripes[i].dev;
3092 ret = btrfs_free_dev_extent(trans, device,
3093 map->stripes[i].physical,
3094 &dev_extent_len);
3095 if (ret) {
3096 mutex_unlock(&fs_devices->device_list_mutex);
3097 btrfs_abort_transaction(trans, ret);
3098 goto out;
3099 }
3100
3101 if (device->bytes_used > 0) {
3102 mutex_lock(&fs_info->chunk_mutex);
3103 btrfs_device_set_bytes_used(device,
3104 device->bytes_used - dev_extent_len);
3105 atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3106 btrfs_clear_space_info_full(fs_info);
3107 mutex_unlock(&fs_info->chunk_mutex);
3108 }
3109 }
3110 mutex_unlock(&fs_devices->device_list_mutex);
3111
3112 /*
3113 * We acquire fs_info->chunk_mutex for 2 reasons:
3114 *
3115 * 1) Just like with the first phase of the chunk allocation, we must
3116 * reserve system space, do all chunk btree updates and deletions, and
3117 * update the system chunk array in the superblock while holding this
3118 * mutex. This is for similar reasons as explained on the comment at
3119 * the top of btrfs_chunk_alloc();
3120 *
3121 * 2) Prevent races with the final phase of a device replace operation
3122 * that replaces the device object associated with the map's stripes,
3123 * because the device object's id can change at any time during that
3124 * final phase of the device replace operation
3125 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3126 * replaced device and then see it with an ID of
3127 * BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3128 * the device item, which does not exists on the chunk btree.
3129 * The finishing phase of device replace acquires both the
3130 * device_list_mutex and the chunk_mutex, in that order, so we are
3131 * safe by just acquiring the chunk_mutex.
3132 */
3133 trans->removing_chunk = true;
3134 mutex_lock(&fs_info->chunk_mutex);
3135
3136 check_system_chunk(trans, map->type);
3137
3138 ret = remove_chunk_item(trans, map, chunk_offset);
3139 /*
3140 * Normally we should not get -ENOSPC since we reserved space before
3141 * through the call to check_system_chunk().
3142 *
3143 * Despite our system space_info having enough free space, we may not
3144 * be able to allocate extents from its block groups, because all have
3145 * an incompatible profile, which will force us to allocate a new system
3146 * block group with the right profile, or right after we called
3147 * check_system_space() above, a scrub turned the only system block group
3148 * with enough free space into RO mode.
3149 * This is explained with more detail at do_chunk_alloc().
3150 *
3151 * So if we get -ENOSPC, allocate a new system chunk and retry once.
3152 */
3153 if (ret == -ENOSPC) {
3154 const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
3155 struct btrfs_block_group *sys_bg;
3156
3157 sys_bg = btrfs_create_chunk(trans, sys_flags);
3158 if (IS_ERR(sys_bg)) {
3159 ret = PTR_ERR(sys_bg);
3160 btrfs_abort_transaction(trans, ret);
3161 goto out;
3162 }
3163
3164 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3165 if (ret) {
3166 btrfs_abort_transaction(trans, ret);
3167 goto out;
3168 }
3169
3170 ret = remove_chunk_item(trans, map, chunk_offset);
3171 if (ret) {
3172 btrfs_abort_transaction(trans, ret);
3173 goto out;
3174 }
3175 } else if (ret) {
3176 btrfs_abort_transaction(trans, ret);
3177 goto out;
3178 }
3179
3180 trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3181
3182 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3183 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3184 if (ret) {
3185 btrfs_abort_transaction(trans, ret);
3186 goto out;
3187 }
3188 }
3189
3190 mutex_unlock(&fs_info->chunk_mutex);
3191 trans->removing_chunk = false;
3192
3193 /*
3194 * We are done with chunk btree updates and deletions, so release the
3195 * system space we previously reserved (with check_system_chunk()).
3196 */
3197 btrfs_trans_release_chunk_metadata(trans);
3198
3199 ret = btrfs_remove_block_group(trans, chunk_offset, em);
3200 if (ret) {
3201 btrfs_abort_transaction(trans, ret);
3202 goto out;
3203 }
3204
3205 out:
3206 if (trans->removing_chunk) {
3207 mutex_unlock(&fs_info->chunk_mutex);
3208 trans->removing_chunk = false;
3209 }
3210 /* once for us */
3211 free_extent_map(em);
3212 return ret;
3213 }
3214
btrfs_relocate_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3215 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3216 {
3217 struct btrfs_root *root = fs_info->chunk_root;
3218 struct btrfs_trans_handle *trans;
3219 struct btrfs_block_group *block_group;
3220 u64 length;
3221 int ret;
3222
3223 if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
3224 btrfs_err(fs_info,
3225 "relocate: not supported on extent tree v2 yet");
3226 return -EINVAL;
3227 }
3228
3229 /*
3230 * Prevent races with automatic removal of unused block groups.
3231 * After we relocate and before we remove the chunk with offset
3232 * chunk_offset, automatic removal of the block group can kick in,
3233 * resulting in a failure when calling btrfs_remove_chunk() below.
3234 *
3235 * Make sure to acquire this mutex before doing a tree search (dev
3236 * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3237 * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3238 * we release the path used to search the chunk/dev tree and before
3239 * the current task acquires this mutex and calls us.
3240 */
3241 lockdep_assert_held(&fs_info->reclaim_bgs_lock);
3242
3243 /* step one, relocate all the extents inside this chunk */
3244 btrfs_scrub_pause(fs_info);
3245 ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3246 btrfs_scrub_continue(fs_info);
3247 if (ret)
3248 return ret;
3249
3250 block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3251 if (!block_group)
3252 return -ENOENT;
3253 btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3254 length = block_group->length;
3255 btrfs_put_block_group(block_group);
3256
3257 /*
3258 * On a zoned file system, discard the whole block group, this will
3259 * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3260 * resetting the zone fails, don't treat it as a fatal problem from the
3261 * filesystem's point of view.
3262 */
3263 if (btrfs_is_zoned(fs_info)) {
3264 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL);
3265 if (ret)
3266 btrfs_info(fs_info,
3267 "failed to reset zone %llu after relocation",
3268 chunk_offset);
3269 }
3270
3271 trans = btrfs_start_trans_remove_block_group(root->fs_info,
3272 chunk_offset);
3273 if (IS_ERR(trans)) {
3274 ret = PTR_ERR(trans);
3275 btrfs_handle_fs_error(root->fs_info, ret, NULL);
3276 return ret;
3277 }
3278
3279 /*
3280 * step two, delete the device extents and the
3281 * chunk tree entries
3282 */
3283 ret = btrfs_remove_chunk(trans, chunk_offset);
3284 btrfs_end_transaction(trans);
3285 return ret;
3286 }
3287
btrfs_relocate_sys_chunks(struct btrfs_fs_info * fs_info)3288 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3289 {
3290 struct btrfs_root *chunk_root = fs_info->chunk_root;
3291 struct btrfs_path *path;
3292 struct extent_buffer *leaf;
3293 struct btrfs_chunk *chunk;
3294 struct btrfs_key key;
3295 struct btrfs_key found_key;
3296 u64 chunk_type;
3297 bool retried = false;
3298 int failed = 0;
3299 int ret;
3300
3301 path = btrfs_alloc_path();
3302 if (!path)
3303 return -ENOMEM;
3304
3305 again:
3306 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3307 key.offset = (u64)-1;
3308 key.type = BTRFS_CHUNK_ITEM_KEY;
3309
3310 while (1) {
3311 mutex_lock(&fs_info->reclaim_bgs_lock);
3312 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3313 if (ret < 0) {
3314 mutex_unlock(&fs_info->reclaim_bgs_lock);
3315 goto error;
3316 }
3317 BUG_ON(ret == 0); /* Corruption */
3318
3319 ret = btrfs_previous_item(chunk_root, path, key.objectid,
3320 key.type);
3321 if (ret)
3322 mutex_unlock(&fs_info->reclaim_bgs_lock);
3323 if (ret < 0)
3324 goto error;
3325 if (ret > 0)
3326 break;
3327
3328 leaf = path->nodes[0];
3329 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3330
3331 chunk = btrfs_item_ptr(leaf, path->slots[0],
3332 struct btrfs_chunk);
3333 chunk_type = btrfs_chunk_type(leaf, chunk);
3334 btrfs_release_path(path);
3335
3336 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3337 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3338 if (ret == -ENOSPC)
3339 failed++;
3340 else
3341 BUG_ON(ret);
3342 }
3343 mutex_unlock(&fs_info->reclaim_bgs_lock);
3344
3345 if (found_key.offset == 0)
3346 break;
3347 key.offset = found_key.offset - 1;
3348 }
3349 ret = 0;
3350 if (failed && !retried) {
3351 failed = 0;
3352 retried = true;
3353 goto again;
3354 } else if (WARN_ON(failed && retried)) {
3355 ret = -ENOSPC;
3356 }
3357 error:
3358 btrfs_free_path(path);
3359 return ret;
3360 }
3361
3362 /*
3363 * return 1 : allocate a data chunk successfully,
3364 * return <0: errors during allocating a data chunk,
3365 * return 0 : no need to allocate a data chunk.
3366 */
btrfs_may_alloc_data_chunk(struct btrfs_fs_info * fs_info,u64 chunk_offset)3367 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3368 u64 chunk_offset)
3369 {
3370 struct btrfs_block_group *cache;
3371 u64 bytes_used;
3372 u64 chunk_type;
3373
3374 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3375 ASSERT(cache);
3376 chunk_type = cache->flags;
3377 btrfs_put_block_group(cache);
3378
3379 if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3380 return 0;
3381
3382 spin_lock(&fs_info->data_sinfo->lock);
3383 bytes_used = fs_info->data_sinfo->bytes_used;
3384 spin_unlock(&fs_info->data_sinfo->lock);
3385
3386 if (!bytes_used) {
3387 struct btrfs_trans_handle *trans;
3388 int ret;
3389
3390 trans = btrfs_join_transaction(fs_info->tree_root);
3391 if (IS_ERR(trans))
3392 return PTR_ERR(trans);
3393
3394 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3395 btrfs_end_transaction(trans);
3396 if (ret < 0)
3397 return ret;
3398 return 1;
3399 }
3400
3401 return 0;
3402 }
3403
insert_balance_item(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl)3404 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3405 struct btrfs_balance_control *bctl)
3406 {
3407 struct btrfs_root *root = fs_info->tree_root;
3408 struct btrfs_trans_handle *trans;
3409 struct btrfs_balance_item *item;
3410 struct btrfs_disk_balance_args disk_bargs;
3411 struct btrfs_path *path;
3412 struct extent_buffer *leaf;
3413 struct btrfs_key key;
3414 int ret, err;
3415
3416 path = btrfs_alloc_path();
3417 if (!path)
3418 return -ENOMEM;
3419
3420 trans = btrfs_start_transaction(root, 0);
3421 if (IS_ERR(trans)) {
3422 btrfs_free_path(path);
3423 return PTR_ERR(trans);
3424 }
3425
3426 key.objectid = BTRFS_BALANCE_OBJECTID;
3427 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3428 key.offset = 0;
3429
3430 ret = btrfs_insert_empty_item(trans, root, path, &key,
3431 sizeof(*item));
3432 if (ret)
3433 goto out;
3434
3435 leaf = path->nodes[0];
3436 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3437
3438 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3439
3440 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3441 btrfs_set_balance_data(leaf, item, &disk_bargs);
3442 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3443 btrfs_set_balance_meta(leaf, item, &disk_bargs);
3444 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3445 btrfs_set_balance_sys(leaf, item, &disk_bargs);
3446
3447 btrfs_set_balance_flags(leaf, item, bctl->flags);
3448
3449 btrfs_mark_buffer_dirty(leaf);
3450 out:
3451 btrfs_free_path(path);
3452 err = btrfs_commit_transaction(trans);
3453 if (err && !ret)
3454 ret = err;
3455 return ret;
3456 }
3457
del_balance_item(struct btrfs_fs_info * fs_info)3458 static int del_balance_item(struct btrfs_fs_info *fs_info)
3459 {
3460 struct btrfs_root *root = fs_info->tree_root;
3461 struct btrfs_trans_handle *trans;
3462 struct btrfs_path *path;
3463 struct btrfs_key key;
3464 int ret, err;
3465
3466 path = btrfs_alloc_path();
3467 if (!path)
3468 return -ENOMEM;
3469
3470 trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3471 if (IS_ERR(trans)) {
3472 btrfs_free_path(path);
3473 return PTR_ERR(trans);
3474 }
3475
3476 key.objectid = BTRFS_BALANCE_OBJECTID;
3477 key.type = BTRFS_TEMPORARY_ITEM_KEY;
3478 key.offset = 0;
3479
3480 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3481 if (ret < 0)
3482 goto out;
3483 if (ret > 0) {
3484 ret = -ENOENT;
3485 goto out;
3486 }
3487
3488 ret = btrfs_del_item(trans, root, path);
3489 out:
3490 btrfs_free_path(path);
3491 err = btrfs_commit_transaction(trans);
3492 if (err && !ret)
3493 ret = err;
3494 return ret;
3495 }
3496
3497 /*
3498 * This is a heuristic used to reduce the number of chunks balanced on
3499 * resume after balance was interrupted.
3500 */
update_balance_args(struct btrfs_balance_control * bctl)3501 static void update_balance_args(struct btrfs_balance_control *bctl)
3502 {
3503 /*
3504 * Turn on soft mode for chunk types that were being converted.
3505 */
3506 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3507 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3508 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3509 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3510 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3511 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3512
3513 /*
3514 * Turn on usage filter if is not already used. The idea is
3515 * that chunks that we have already balanced should be
3516 * reasonably full. Don't do it for chunks that are being
3517 * converted - that will keep us from relocating unconverted
3518 * (albeit full) chunks.
3519 */
3520 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3521 !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3522 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3523 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3524 bctl->data.usage = 90;
3525 }
3526 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3527 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3528 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3529 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3530 bctl->sys.usage = 90;
3531 }
3532 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3533 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3534 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3535 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3536 bctl->meta.usage = 90;
3537 }
3538 }
3539
3540 /*
3541 * Clear the balance status in fs_info and delete the balance item from disk.
3542 */
reset_balance_state(struct btrfs_fs_info * fs_info)3543 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3544 {
3545 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3546 int ret;
3547
3548 BUG_ON(!fs_info->balance_ctl);
3549
3550 spin_lock(&fs_info->balance_lock);
3551 fs_info->balance_ctl = NULL;
3552 spin_unlock(&fs_info->balance_lock);
3553
3554 kfree(bctl);
3555 ret = del_balance_item(fs_info);
3556 if (ret)
3557 btrfs_handle_fs_error(fs_info, ret, NULL);
3558 }
3559
3560 /*
3561 * Balance filters. Return 1 if chunk should be filtered out
3562 * (should not be balanced).
3563 */
chunk_profiles_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3564 static int chunk_profiles_filter(u64 chunk_type,
3565 struct btrfs_balance_args *bargs)
3566 {
3567 chunk_type = chunk_to_extended(chunk_type) &
3568 BTRFS_EXTENDED_PROFILE_MASK;
3569
3570 if (bargs->profiles & chunk_type)
3571 return 0;
3572
3573 return 1;
3574 }
3575
chunk_usage_range_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3576 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3577 struct btrfs_balance_args *bargs)
3578 {
3579 struct btrfs_block_group *cache;
3580 u64 chunk_used;
3581 u64 user_thresh_min;
3582 u64 user_thresh_max;
3583 int ret = 1;
3584
3585 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3586 chunk_used = cache->used;
3587
3588 if (bargs->usage_min == 0)
3589 user_thresh_min = 0;
3590 else
3591 user_thresh_min = div_factor_fine(cache->length,
3592 bargs->usage_min);
3593
3594 if (bargs->usage_max == 0)
3595 user_thresh_max = 1;
3596 else if (bargs->usage_max > 100)
3597 user_thresh_max = cache->length;
3598 else
3599 user_thresh_max = div_factor_fine(cache->length,
3600 bargs->usage_max);
3601
3602 if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3603 ret = 0;
3604
3605 btrfs_put_block_group(cache);
3606 return ret;
3607 }
3608
chunk_usage_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)3609 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3610 u64 chunk_offset, struct btrfs_balance_args *bargs)
3611 {
3612 struct btrfs_block_group *cache;
3613 u64 chunk_used, user_thresh;
3614 int ret = 1;
3615
3616 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3617 chunk_used = cache->used;
3618
3619 if (bargs->usage_min == 0)
3620 user_thresh = 1;
3621 else if (bargs->usage > 100)
3622 user_thresh = cache->length;
3623 else
3624 user_thresh = div_factor_fine(cache->length, bargs->usage);
3625
3626 if (chunk_used < user_thresh)
3627 ret = 0;
3628
3629 btrfs_put_block_group(cache);
3630 return ret;
3631 }
3632
chunk_devid_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3633 static int chunk_devid_filter(struct extent_buffer *leaf,
3634 struct btrfs_chunk *chunk,
3635 struct btrfs_balance_args *bargs)
3636 {
3637 struct btrfs_stripe *stripe;
3638 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3639 int i;
3640
3641 for (i = 0; i < num_stripes; i++) {
3642 stripe = btrfs_stripe_nr(chunk, i);
3643 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3644 return 0;
3645 }
3646
3647 return 1;
3648 }
3649
calc_data_stripes(u64 type,int num_stripes)3650 static u64 calc_data_stripes(u64 type, int num_stripes)
3651 {
3652 const int index = btrfs_bg_flags_to_raid_index(type);
3653 const int ncopies = btrfs_raid_array[index].ncopies;
3654 const int nparity = btrfs_raid_array[index].nparity;
3655
3656 return (num_stripes - nparity) / ncopies;
3657 }
3658
3659 /* [pstart, pend) */
chunk_drange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3660 static int chunk_drange_filter(struct extent_buffer *leaf,
3661 struct btrfs_chunk *chunk,
3662 struct btrfs_balance_args *bargs)
3663 {
3664 struct btrfs_stripe *stripe;
3665 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3666 u64 stripe_offset;
3667 u64 stripe_length;
3668 u64 type;
3669 int factor;
3670 int i;
3671
3672 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3673 return 0;
3674
3675 type = btrfs_chunk_type(leaf, chunk);
3676 factor = calc_data_stripes(type, num_stripes);
3677
3678 for (i = 0; i < num_stripes; i++) {
3679 stripe = btrfs_stripe_nr(chunk, i);
3680 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3681 continue;
3682
3683 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3684 stripe_length = btrfs_chunk_length(leaf, chunk);
3685 stripe_length = div_u64(stripe_length, factor);
3686
3687 if (stripe_offset < bargs->pend &&
3688 stripe_offset + stripe_length > bargs->pstart)
3689 return 0;
3690 }
3691
3692 return 1;
3693 }
3694
3695 /* [vstart, vend) */
chunk_vrange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)3696 static int chunk_vrange_filter(struct extent_buffer *leaf,
3697 struct btrfs_chunk *chunk,
3698 u64 chunk_offset,
3699 struct btrfs_balance_args *bargs)
3700 {
3701 if (chunk_offset < bargs->vend &&
3702 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3703 /* at least part of the chunk is inside this vrange */
3704 return 0;
3705
3706 return 1;
3707 }
3708
chunk_stripes_range_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)3709 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3710 struct btrfs_chunk *chunk,
3711 struct btrfs_balance_args *bargs)
3712 {
3713 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3714
3715 if (bargs->stripes_min <= num_stripes
3716 && num_stripes <= bargs->stripes_max)
3717 return 0;
3718
3719 return 1;
3720 }
3721
chunk_soft_convert_filter(u64 chunk_type,struct btrfs_balance_args * bargs)3722 static int chunk_soft_convert_filter(u64 chunk_type,
3723 struct btrfs_balance_args *bargs)
3724 {
3725 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3726 return 0;
3727
3728 chunk_type = chunk_to_extended(chunk_type) &
3729 BTRFS_EXTENDED_PROFILE_MASK;
3730
3731 if (bargs->target == chunk_type)
3732 return 1;
3733
3734 return 0;
3735 }
3736
should_balance_chunk(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset)3737 static int should_balance_chunk(struct extent_buffer *leaf,
3738 struct btrfs_chunk *chunk, u64 chunk_offset)
3739 {
3740 struct btrfs_fs_info *fs_info = leaf->fs_info;
3741 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3742 struct btrfs_balance_args *bargs = NULL;
3743 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3744
3745 /* type filter */
3746 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3747 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3748 return 0;
3749 }
3750
3751 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3752 bargs = &bctl->data;
3753 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3754 bargs = &bctl->sys;
3755 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3756 bargs = &bctl->meta;
3757
3758 /* profiles filter */
3759 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3760 chunk_profiles_filter(chunk_type, bargs)) {
3761 return 0;
3762 }
3763
3764 /* usage filter */
3765 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3766 chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3767 return 0;
3768 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3769 chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3770 return 0;
3771 }
3772
3773 /* devid filter */
3774 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3775 chunk_devid_filter(leaf, chunk, bargs)) {
3776 return 0;
3777 }
3778
3779 /* drange filter, makes sense only with devid filter */
3780 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3781 chunk_drange_filter(leaf, chunk, bargs)) {
3782 return 0;
3783 }
3784
3785 /* vrange filter */
3786 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3787 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3788 return 0;
3789 }
3790
3791 /* stripes filter */
3792 if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3793 chunk_stripes_range_filter(leaf, chunk, bargs)) {
3794 return 0;
3795 }
3796
3797 /* soft profile changing mode */
3798 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3799 chunk_soft_convert_filter(chunk_type, bargs)) {
3800 return 0;
3801 }
3802
3803 /*
3804 * limited by count, must be the last filter
3805 */
3806 if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3807 if (bargs->limit == 0)
3808 return 0;
3809 else
3810 bargs->limit--;
3811 } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3812 /*
3813 * Same logic as the 'limit' filter; the minimum cannot be
3814 * determined here because we do not have the global information
3815 * about the count of all chunks that satisfy the filters.
3816 */
3817 if (bargs->limit_max == 0)
3818 return 0;
3819 else
3820 bargs->limit_max--;
3821 }
3822
3823 return 1;
3824 }
3825
__btrfs_balance(struct btrfs_fs_info * fs_info)3826 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3827 {
3828 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3829 struct btrfs_root *chunk_root = fs_info->chunk_root;
3830 u64 chunk_type;
3831 struct btrfs_chunk *chunk;
3832 struct btrfs_path *path = NULL;
3833 struct btrfs_key key;
3834 struct btrfs_key found_key;
3835 struct extent_buffer *leaf;
3836 int slot;
3837 int ret;
3838 int enospc_errors = 0;
3839 bool counting = true;
3840 /* The single value limit and min/max limits use the same bytes in the */
3841 u64 limit_data = bctl->data.limit;
3842 u64 limit_meta = bctl->meta.limit;
3843 u64 limit_sys = bctl->sys.limit;
3844 u32 count_data = 0;
3845 u32 count_meta = 0;
3846 u32 count_sys = 0;
3847 int chunk_reserved = 0;
3848
3849 path = btrfs_alloc_path();
3850 if (!path) {
3851 ret = -ENOMEM;
3852 goto error;
3853 }
3854
3855 /* zero out stat counters */
3856 spin_lock(&fs_info->balance_lock);
3857 memset(&bctl->stat, 0, sizeof(bctl->stat));
3858 spin_unlock(&fs_info->balance_lock);
3859 again:
3860 if (!counting) {
3861 /*
3862 * The single value limit and min/max limits use the same bytes
3863 * in the
3864 */
3865 bctl->data.limit = limit_data;
3866 bctl->meta.limit = limit_meta;
3867 bctl->sys.limit = limit_sys;
3868 }
3869 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3870 key.offset = (u64)-1;
3871 key.type = BTRFS_CHUNK_ITEM_KEY;
3872
3873 while (1) {
3874 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3875 atomic_read(&fs_info->balance_cancel_req)) {
3876 ret = -ECANCELED;
3877 goto error;
3878 }
3879
3880 mutex_lock(&fs_info->reclaim_bgs_lock);
3881 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3882 if (ret < 0) {
3883 mutex_unlock(&fs_info->reclaim_bgs_lock);
3884 goto error;
3885 }
3886
3887 /*
3888 * this shouldn't happen, it means the last relocate
3889 * failed
3890 */
3891 if (ret == 0)
3892 BUG(); /* FIXME break ? */
3893
3894 ret = btrfs_previous_item(chunk_root, path, 0,
3895 BTRFS_CHUNK_ITEM_KEY);
3896 if (ret) {
3897 mutex_unlock(&fs_info->reclaim_bgs_lock);
3898 ret = 0;
3899 break;
3900 }
3901
3902 leaf = path->nodes[0];
3903 slot = path->slots[0];
3904 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3905
3906 if (found_key.objectid != key.objectid) {
3907 mutex_unlock(&fs_info->reclaim_bgs_lock);
3908 break;
3909 }
3910
3911 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3912 chunk_type = btrfs_chunk_type(leaf, chunk);
3913
3914 if (!counting) {
3915 spin_lock(&fs_info->balance_lock);
3916 bctl->stat.considered++;
3917 spin_unlock(&fs_info->balance_lock);
3918 }
3919
3920 ret = should_balance_chunk(leaf, chunk, found_key.offset);
3921
3922 btrfs_release_path(path);
3923 if (!ret) {
3924 mutex_unlock(&fs_info->reclaim_bgs_lock);
3925 goto loop;
3926 }
3927
3928 if (counting) {
3929 mutex_unlock(&fs_info->reclaim_bgs_lock);
3930 spin_lock(&fs_info->balance_lock);
3931 bctl->stat.expected++;
3932 spin_unlock(&fs_info->balance_lock);
3933
3934 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3935 count_data++;
3936 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3937 count_sys++;
3938 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3939 count_meta++;
3940
3941 goto loop;
3942 }
3943
3944 /*
3945 * Apply limit_min filter, no need to check if the LIMITS
3946 * filter is used, limit_min is 0 by default
3947 */
3948 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3949 count_data < bctl->data.limit_min)
3950 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3951 count_meta < bctl->meta.limit_min)
3952 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3953 count_sys < bctl->sys.limit_min)) {
3954 mutex_unlock(&fs_info->reclaim_bgs_lock);
3955 goto loop;
3956 }
3957
3958 if (!chunk_reserved) {
3959 /*
3960 * We may be relocating the only data chunk we have,
3961 * which could potentially end up with losing data's
3962 * raid profile, so lets allocate an empty one in
3963 * advance.
3964 */
3965 ret = btrfs_may_alloc_data_chunk(fs_info,
3966 found_key.offset);
3967 if (ret < 0) {
3968 mutex_unlock(&fs_info->reclaim_bgs_lock);
3969 goto error;
3970 } else if (ret == 1) {
3971 chunk_reserved = 1;
3972 }
3973 }
3974
3975 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3976 mutex_unlock(&fs_info->reclaim_bgs_lock);
3977 if (ret == -ENOSPC) {
3978 enospc_errors++;
3979 } else if (ret == -ETXTBSY) {
3980 btrfs_info(fs_info,
3981 "skipping relocation of block group %llu due to active swapfile",
3982 found_key.offset);
3983 ret = 0;
3984 } else if (ret) {
3985 goto error;
3986 } else {
3987 spin_lock(&fs_info->balance_lock);
3988 bctl->stat.completed++;
3989 spin_unlock(&fs_info->balance_lock);
3990 }
3991 loop:
3992 if (found_key.offset == 0)
3993 break;
3994 key.offset = found_key.offset - 1;
3995 }
3996
3997 if (counting) {
3998 btrfs_release_path(path);
3999 counting = false;
4000 goto again;
4001 }
4002 error:
4003 btrfs_free_path(path);
4004 if (enospc_errors) {
4005 btrfs_info(fs_info, "%d enospc errors during balance",
4006 enospc_errors);
4007 if (!ret)
4008 ret = -ENOSPC;
4009 }
4010
4011 return ret;
4012 }
4013
4014 /**
4015 * alloc_profile_is_valid - see if a given profile is valid and reduced
4016 * @flags: profile to validate
4017 * @extended: if true @flags is treated as an extended profile
4018 */
alloc_profile_is_valid(u64 flags,int extended)4019 static int alloc_profile_is_valid(u64 flags, int extended)
4020 {
4021 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
4022 BTRFS_BLOCK_GROUP_PROFILE_MASK);
4023
4024 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
4025
4026 /* 1) check that all other bits are zeroed */
4027 if (flags & ~mask)
4028 return 0;
4029
4030 /* 2) see if profile is reduced */
4031 if (flags == 0)
4032 return !extended; /* "0" is valid for usual profiles */
4033
4034 return has_single_bit_set(flags);
4035 }
4036
balance_need_close(struct btrfs_fs_info * fs_info)4037 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
4038 {
4039 /* cancel requested || normal exit path */
4040 return atomic_read(&fs_info->balance_cancel_req) ||
4041 (atomic_read(&fs_info->balance_pause_req) == 0 &&
4042 atomic_read(&fs_info->balance_cancel_req) == 0);
4043 }
4044
4045 /*
4046 * Validate target profile against allowed profiles and return true if it's OK.
4047 * Otherwise print the error message and return false.
4048 */
validate_convert_profile(struct btrfs_fs_info * fs_info,const struct btrfs_balance_args * bargs,u64 allowed,const char * type)4049 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
4050 const struct btrfs_balance_args *bargs,
4051 u64 allowed, const char *type)
4052 {
4053 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
4054 return true;
4055
4056 /* Profile is valid and does not have bits outside of the allowed set */
4057 if (alloc_profile_is_valid(bargs->target, 1) &&
4058 (bargs->target & ~allowed) == 0)
4059 return true;
4060
4061 btrfs_err(fs_info, "balance: invalid convert %s profile %s",
4062 type, btrfs_bg_type_to_raid_name(bargs->target));
4063 return false;
4064 }
4065
4066 /*
4067 * Fill @buf with textual description of balance filter flags @bargs, up to
4068 * @size_buf including the terminating null. The output may be trimmed if it
4069 * does not fit into the provided buffer.
4070 */
describe_balance_args(struct btrfs_balance_args * bargs,char * buf,u32 size_buf)4071 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
4072 u32 size_buf)
4073 {
4074 int ret;
4075 u32 size_bp = size_buf;
4076 char *bp = buf;
4077 u64 flags = bargs->flags;
4078 char tmp_buf[128] = {'\0'};
4079
4080 if (!flags)
4081 return;
4082
4083 #define CHECK_APPEND_NOARG(a) \
4084 do { \
4085 ret = snprintf(bp, size_bp, (a)); \
4086 if (ret < 0 || ret >= size_bp) \
4087 goto out_overflow; \
4088 size_bp -= ret; \
4089 bp += ret; \
4090 } while (0)
4091
4092 #define CHECK_APPEND_1ARG(a, v1) \
4093 do { \
4094 ret = snprintf(bp, size_bp, (a), (v1)); \
4095 if (ret < 0 || ret >= size_bp) \
4096 goto out_overflow; \
4097 size_bp -= ret; \
4098 bp += ret; \
4099 } while (0)
4100
4101 #define CHECK_APPEND_2ARG(a, v1, v2) \
4102 do { \
4103 ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
4104 if (ret < 0 || ret >= size_bp) \
4105 goto out_overflow; \
4106 size_bp -= ret; \
4107 bp += ret; \
4108 } while (0)
4109
4110 if (flags & BTRFS_BALANCE_ARGS_CONVERT)
4111 CHECK_APPEND_1ARG("convert=%s,",
4112 btrfs_bg_type_to_raid_name(bargs->target));
4113
4114 if (flags & BTRFS_BALANCE_ARGS_SOFT)
4115 CHECK_APPEND_NOARG("soft,");
4116
4117 if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
4118 btrfs_describe_block_groups(bargs->profiles, tmp_buf,
4119 sizeof(tmp_buf));
4120 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
4121 }
4122
4123 if (flags & BTRFS_BALANCE_ARGS_USAGE)
4124 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
4125
4126 if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
4127 CHECK_APPEND_2ARG("usage=%u..%u,",
4128 bargs->usage_min, bargs->usage_max);
4129
4130 if (flags & BTRFS_BALANCE_ARGS_DEVID)
4131 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
4132
4133 if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4134 CHECK_APPEND_2ARG("drange=%llu..%llu,",
4135 bargs->pstart, bargs->pend);
4136
4137 if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4138 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4139 bargs->vstart, bargs->vend);
4140
4141 if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4142 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4143
4144 if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4145 CHECK_APPEND_2ARG("limit=%u..%u,",
4146 bargs->limit_min, bargs->limit_max);
4147
4148 if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4149 CHECK_APPEND_2ARG("stripes=%u..%u,",
4150 bargs->stripes_min, bargs->stripes_max);
4151
4152 #undef CHECK_APPEND_2ARG
4153 #undef CHECK_APPEND_1ARG
4154 #undef CHECK_APPEND_NOARG
4155
4156 out_overflow:
4157
4158 if (size_bp < size_buf)
4159 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4160 else
4161 buf[0] = '\0';
4162 }
4163
describe_balance_start_or_resume(struct btrfs_fs_info * fs_info)4164 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4165 {
4166 u32 size_buf = 1024;
4167 char tmp_buf[192] = {'\0'};
4168 char *buf;
4169 char *bp;
4170 u32 size_bp = size_buf;
4171 int ret;
4172 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4173
4174 buf = kzalloc(size_buf, GFP_KERNEL);
4175 if (!buf)
4176 return;
4177
4178 bp = buf;
4179
4180 #define CHECK_APPEND_1ARG(a, v1) \
4181 do { \
4182 ret = snprintf(bp, size_bp, (a), (v1)); \
4183 if (ret < 0 || ret >= size_bp) \
4184 goto out_overflow; \
4185 size_bp -= ret; \
4186 bp += ret; \
4187 } while (0)
4188
4189 if (bctl->flags & BTRFS_BALANCE_FORCE)
4190 CHECK_APPEND_1ARG("%s", "-f ");
4191
4192 if (bctl->flags & BTRFS_BALANCE_DATA) {
4193 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4194 CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4195 }
4196
4197 if (bctl->flags & BTRFS_BALANCE_METADATA) {
4198 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4199 CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4200 }
4201
4202 if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4203 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4204 CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4205 }
4206
4207 #undef CHECK_APPEND_1ARG
4208
4209 out_overflow:
4210
4211 if (size_bp < size_buf)
4212 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4213 btrfs_info(fs_info, "balance: %s %s",
4214 (bctl->flags & BTRFS_BALANCE_RESUME) ?
4215 "resume" : "start", buf);
4216
4217 kfree(buf);
4218 }
4219
4220 /*
4221 * Should be called with balance mutexe held
4222 */
btrfs_balance(struct btrfs_fs_info * fs_info,struct btrfs_balance_control * bctl,struct btrfs_ioctl_balance_args * bargs)4223 int btrfs_balance(struct btrfs_fs_info *fs_info,
4224 struct btrfs_balance_control *bctl,
4225 struct btrfs_ioctl_balance_args *bargs)
4226 {
4227 u64 meta_target, data_target;
4228 u64 allowed;
4229 int mixed = 0;
4230 int ret;
4231 u64 num_devices;
4232 unsigned seq;
4233 bool reducing_redundancy;
4234 int i;
4235
4236 if (btrfs_fs_closing(fs_info) ||
4237 atomic_read(&fs_info->balance_pause_req) ||
4238 btrfs_should_cancel_balance(fs_info)) {
4239 ret = -EINVAL;
4240 goto out;
4241 }
4242
4243 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4244 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4245 mixed = 1;
4246
4247 /*
4248 * In case of mixed groups both data and meta should be picked,
4249 * and identical options should be given for both of them.
4250 */
4251 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4252 if (mixed && (bctl->flags & allowed)) {
4253 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4254 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4255 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4256 btrfs_err(fs_info,
4257 "balance: mixed groups data and metadata options must be the same");
4258 ret = -EINVAL;
4259 goto out;
4260 }
4261 }
4262
4263 /*
4264 * rw_devices will not change at the moment, device add/delete/replace
4265 * are exclusive
4266 */
4267 num_devices = fs_info->fs_devices->rw_devices;
4268
4269 /*
4270 * SINGLE profile on-disk has no profile bit, but in-memory we have a
4271 * special bit for it, to make it easier to distinguish. Thus we need
4272 * to set it manually, or balance would refuse the profile.
4273 */
4274 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4275 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4276 if (num_devices >= btrfs_raid_array[i].devs_min)
4277 allowed |= btrfs_raid_array[i].bg_flag;
4278
4279 if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4280 !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4281 !validate_convert_profile(fs_info, &bctl->sys, allowed, "system")) {
4282 ret = -EINVAL;
4283 goto out;
4284 }
4285
4286 /*
4287 * Allow to reduce metadata or system integrity only if force set for
4288 * profiles with redundancy (copies, parity)
4289 */
4290 allowed = 0;
4291 for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4292 if (btrfs_raid_array[i].ncopies >= 2 ||
4293 btrfs_raid_array[i].tolerated_failures >= 1)
4294 allowed |= btrfs_raid_array[i].bg_flag;
4295 }
4296 do {
4297 seq = read_seqbegin(&fs_info->profiles_lock);
4298
4299 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4300 (fs_info->avail_system_alloc_bits & allowed) &&
4301 !(bctl->sys.target & allowed)) ||
4302 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4303 (fs_info->avail_metadata_alloc_bits & allowed) &&
4304 !(bctl->meta.target & allowed)))
4305 reducing_redundancy = true;
4306 else
4307 reducing_redundancy = false;
4308
4309 /* if we're not converting, the target field is uninitialized */
4310 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4311 bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4312 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4313 bctl->data.target : fs_info->avail_data_alloc_bits;
4314 } while (read_seqretry(&fs_info->profiles_lock, seq));
4315
4316 if (reducing_redundancy) {
4317 if (bctl->flags & BTRFS_BALANCE_FORCE) {
4318 btrfs_info(fs_info,
4319 "balance: force reducing metadata redundancy");
4320 } else {
4321 btrfs_err(fs_info,
4322 "balance: reduces metadata redundancy, use --force if you want this");
4323 ret = -EINVAL;
4324 goto out;
4325 }
4326 }
4327
4328 if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4329 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4330 btrfs_warn(fs_info,
4331 "balance: metadata profile %s has lower redundancy than data profile %s",
4332 btrfs_bg_type_to_raid_name(meta_target),
4333 btrfs_bg_type_to_raid_name(data_target));
4334 }
4335
4336 ret = insert_balance_item(fs_info, bctl);
4337 if (ret && ret != -EEXIST)
4338 goto out;
4339
4340 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4341 BUG_ON(ret == -EEXIST);
4342 BUG_ON(fs_info->balance_ctl);
4343 spin_lock(&fs_info->balance_lock);
4344 fs_info->balance_ctl = bctl;
4345 spin_unlock(&fs_info->balance_lock);
4346 } else {
4347 BUG_ON(ret != -EEXIST);
4348 spin_lock(&fs_info->balance_lock);
4349 update_balance_args(bctl);
4350 spin_unlock(&fs_info->balance_lock);
4351 }
4352
4353 ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4354 set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4355 describe_balance_start_or_resume(fs_info);
4356 mutex_unlock(&fs_info->balance_mutex);
4357
4358 ret = __btrfs_balance(fs_info);
4359
4360 mutex_lock(&fs_info->balance_mutex);
4361 if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) {
4362 btrfs_info(fs_info, "balance: paused");
4363 btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED);
4364 }
4365 /*
4366 * Balance can be canceled by:
4367 *
4368 * - Regular cancel request
4369 * Then ret == -ECANCELED and balance_cancel_req > 0
4370 *
4371 * - Fatal signal to "btrfs" process
4372 * Either the signal caught by wait_reserve_ticket() and callers
4373 * got -EINTR, or caught by btrfs_should_cancel_balance() and
4374 * got -ECANCELED.
4375 * Either way, in this case balance_cancel_req = 0, and
4376 * ret == -EINTR or ret == -ECANCELED.
4377 *
4378 * So here we only check the return value to catch canceled balance.
4379 */
4380 else if (ret == -ECANCELED || ret == -EINTR)
4381 btrfs_info(fs_info, "balance: canceled");
4382 else
4383 btrfs_info(fs_info, "balance: ended with status: %d", ret);
4384
4385 clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4386
4387 if (bargs) {
4388 memset(bargs, 0, sizeof(*bargs));
4389 btrfs_update_ioctl_balance_args(fs_info, bargs);
4390 }
4391
4392 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4393 balance_need_close(fs_info)) {
4394 reset_balance_state(fs_info);
4395 btrfs_exclop_finish(fs_info);
4396 }
4397
4398 wake_up(&fs_info->balance_wait_q);
4399
4400 return ret;
4401 out:
4402 if (bctl->flags & BTRFS_BALANCE_RESUME)
4403 reset_balance_state(fs_info);
4404 else
4405 kfree(bctl);
4406 btrfs_exclop_finish(fs_info);
4407
4408 return ret;
4409 }
4410
balance_kthread(void * data)4411 static int balance_kthread(void *data)
4412 {
4413 struct btrfs_fs_info *fs_info = data;
4414 int ret = 0;
4415
4416 sb_start_write(fs_info->sb);
4417 mutex_lock(&fs_info->balance_mutex);
4418 if (fs_info->balance_ctl)
4419 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4420 mutex_unlock(&fs_info->balance_mutex);
4421 sb_end_write(fs_info->sb);
4422
4423 return ret;
4424 }
4425
btrfs_resume_balance_async(struct btrfs_fs_info * fs_info)4426 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4427 {
4428 struct task_struct *tsk;
4429
4430 mutex_lock(&fs_info->balance_mutex);
4431 if (!fs_info->balance_ctl) {
4432 mutex_unlock(&fs_info->balance_mutex);
4433 return 0;
4434 }
4435 mutex_unlock(&fs_info->balance_mutex);
4436
4437 if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4438 btrfs_info(fs_info, "balance: resume skipped");
4439 return 0;
4440 }
4441
4442 spin_lock(&fs_info->super_lock);
4443 ASSERT(fs_info->exclusive_operation == BTRFS_EXCLOP_BALANCE_PAUSED);
4444 fs_info->exclusive_operation = BTRFS_EXCLOP_BALANCE;
4445 spin_unlock(&fs_info->super_lock);
4446 /*
4447 * A ro->rw remount sequence should continue with the paused balance
4448 * regardless of who pauses it, system or the user as of now, so set
4449 * the resume flag.
4450 */
4451 spin_lock(&fs_info->balance_lock);
4452 fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4453 spin_unlock(&fs_info->balance_lock);
4454
4455 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4456 return PTR_ERR_OR_ZERO(tsk);
4457 }
4458
btrfs_recover_balance(struct btrfs_fs_info * fs_info)4459 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4460 {
4461 struct btrfs_balance_control *bctl;
4462 struct btrfs_balance_item *item;
4463 struct btrfs_disk_balance_args disk_bargs;
4464 struct btrfs_path *path;
4465 struct extent_buffer *leaf;
4466 struct btrfs_key key;
4467 int ret;
4468
4469 path = btrfs_alloc_path();
4470 if (!path)
4471 return -ENOMEM;
4472
4473 key.objectid = BTRFS_BALANCE_OBJECTID;
4474 key.type = BTRFS_TEMPORARY_ITEM_KEY;
4475 key.offset = 0;
4476
4477 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4478 if (ret < 0)
4479 goto out;
4480 if (ret > 0) { /* ret = -ENOENT; */
4481 ret = 0;
4482 goto out;
4483 }
4484
4485 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4486 if (!bctl) {
4487 ret = -ENOMEM;
4488 goto out;
4489 }
4490
4491 leaf = path->nodes[0];
4492 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4493
4494 bctl->flags = btrfs_balance_flags(leaf, item);
4495 bctl->flags |= BTRFS_BALANCE_RESUME;
4496
4497 btrfs_balance_data(leaf, item, &disk_bargs);
4498 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4499 btrfs_balance_meta(leaf, item, &disk_bargs);
4500 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4501 btrfs_balance_sys(leaf, item, &disk_bargs);
4502 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4503
4504 /*
4505 * This should never happen, as the paused balance state is recovered
4506 * during mount without any chance of other exclusive ops to collide.
4507 *
4508 * This gives the exclusive op status to balance and keeps in paused
4509 * state until user intervention (cancel or umount). If the ownership
4510 * cannot be assigned, show a message but do not fail. The balance
4511 * is in a paused state and must have fs_info::balance_ctl properly
4512 * set up.
4513 */
4514 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED))
4515 btrfs_warn(fs_info,
4516 "balance: cannot set exclusive op status, resume manually");
4517
4518 btrfs_release_path(path);
4519
4520 mutex_lock(&fs_info->balance_mutex);
4521 BUG_ON(fs_info->balance_ctl);
4522 spin_lock(&fs_info->balance_lock);
4523 fs_info->balance_ctl = bctl;
4524 spin_unlock(&fs_info->balance_lock);
4525 mutex_unlock(&fs_info->balance_mutex);
4526 out:
4527 btrfs_free_path(path);
4528 return ret;
4529 }
4530
btrfs_pause_balance(struct btrfs_fs_info * fs_info)4531 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4532 {
4533 int ret = 0;
4534
4535 mutex_lock(&fs_info->balance_mutex);
4536 if (!fs_info->balance_ctl) {
4537 mutex_unlock(&fs_info->balance_mutex);
4538 return -ENOTCONN;
4539 }
4540
4541 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4542 atomic_inc(&fs_info->balance_pause_req);
4543 mutex_unlock(&fs_info->balance_mutex);
4544
4545 wait_event(fs_info->balance_wait_q,
4546 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4547
4548 mutex_lock(&fs_info->balance_mutex);
4549 /* we are good with balance_ctl ripped off from under us */
4550 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4551 atomic_dec(&fs_info->balance_pause_req);
4552 } else {
4553 ret = -ENOTCONN;
4554 }
4555
4556 mutex_unlock(&fs_info->balance_mutex);
4557 return ret;
4558 }
4559
btrfs_cancel_balance(struct btrfs_fs_info * fs_info)4560 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4561 {
4562 mutex_lock(&fs_info->balance_mutex);
4563 if (!fs_info->balance_ctl) {
4564 mutex_unlock(&fs_info->balance_mutex);
4565 return -ENOTCONN;
4566 }
4567
4568 /*
4569 * A paused balance with the item stored on disk can be resumed at
4570 * mount time if the mount is read-write. Otherwise it's still paused
4571 * and we must not allow cancelling as it deletes the item.
4572 */
4573 if (sb_rdonly(fs_info->sb)) {
4574 mutex_unlock(&fs_info->balance_mutex);
4575 return -EROFS;
4576 }
4577
4578 atomic_inc(&fs_info->balance_cancel_req);
4579 /*
4580 * if we are running just wait and return, balance item is
4581 * deleted in btrfs_balance in this case
4582 */
4583 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4584 mutex_unlock(&fs_info->balance_mutex);
4585 wait_event(fs_info->balance_wait_q,
4586 !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4587 mutex_lock(&fs_info->balance_mutex);
4588 } else {
4589 mutex_unlock(&fs_info->balance_mutex);
4590 /*
4591 * Lock released to allow other waiters to continue, we'll
4592 * reexamine the status again.
4593 */
4594 mutex_lock(&fs_info->balance_mutex);
4595
4596 if (fs_info->balance_ctl) {
4597 reset_balance_state(fs_info);
4598 btrfs_exclop_finish(fs_info);
4599 btrfs_info(fs_info, "balance: canceled");
4600 }
4601 }
4602
4603 BUG_ON(fs_info->balance_ctl ||
4604 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4605 atomic_dec(&fs_info->balance_cancel_req);
4606 mutex_unlock(&fs_info->balance_mutex);
4607 return 0;
4608 }
4609
btrfs_uuid_scan_kthread(void * data)4610 int btrfs_uuid_scan_kthread(void *data)
4611 {
4612 struct btrfs_fs_info *fs_info = data;
4613 struct btrfs_root *root = fs_info->tree_root;
4614 struct btrfs_key key;
4615 struct btrfs_path *path = NULL;
4616 int ret = 0;
4617 struct extent_buffer *eb;
4618 int slot;
4619 struct btrfs_root_item root_item;
4620 u32 item_size;
4621 struct btrfs_trans_handle *trans = NULL;
4622 bool closing = false;
4623
4624 path = btrfs_alloc_path();
4625 if (!path) {
4626 ret = -ENOMEM;
4627 goto out;
4628 }
4629
4630 key.objectid = 0;
4631 key.type = BTRFS_ROOT_ITEM_KEY;
4632 key.offset = 0;
4633
4634 while (1) {
4635 if (btrfs_fs_closing(fs_info)) {
4636 closing = true;
4637 break;
4638 }
4639 ret = btrfs_search_forward(root, &key, path,
4640 BTRFS_OLDEST_GENERATION);
4641 if (ret) {
4642 if (ret > 0)
4643 ret = 0;
4644 break;
4645 }
4646
4647 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4648 (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4649 key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4650 key.objectid > BTRFS_LAST_FREE_OBJECTID)
4651 goto skip;
4652
4653 eb = path->nodes[0];
4654 slot = path->slots[0];
4655 item_size = btrfs_item_size(eb, slot);
4656 if (item_size < sizeof(root_item))
4657 goto skip;
4658
4659 read_extent_buffer(eb, &root_item,
4660 btrfs_item_ptr_offset(eb, slot),
4661 (int)sizeof(root_item));
4662 if (btrfs_root_refs(&root_item) == 0)
4663 goto skip;
4664
4665 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4666 !btrfs_is_empty_uuid(root_item.received_uuid)) {
4667 if (trans)
4668 goto update_tree;
4669
4670 btrfs_release_path(path);
4671 /*
4672 * 1 - subvol uuid item
4673 * 1 - received_subvol uuid item
4674 */
4675 trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4676 if (IS_ERR(trans)) {
4677 ret = PTR_ERR(trans);
4678 break;
4679 }
4680 continue;
4681 } else {
4682 goto skip;
4683 }
4684 update_tree:
4685 btrfs_release_path(path);
4686 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4687 ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4688 BTRFS_UUID_KEY_SUBVOL,
4689 key.objectid);
4690 if (ret < 0) {
4691 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4692 ret);
4693 break;
4694 }
4695 }
4696
4697 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4698 ret = btrfs_uuid_tree_add(trans,
4699 root_item.received_uuid,
4700 BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4701 key.objectid);
4702 if (ret < 0) {
4703 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4704 ret);
4705 break;
4706 }
4707 }
4708
4709 skip:
4710 btrfs_release_path(path);
4711 if (trans) {
4712 ret = btrfs_end_transaction(trans);
4713 trans = NULL;
4714 if (ret)
4715 break;
4716 }
4717
4718 if (key.offset < (u64)-1) {
4719 key.offset++;
4720 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4721 key.offset = 0;
4722 key.type = BTRFS_ROOT_ITEM_KEY;
4723 } else if (key.objectid < (u64)-1) {
4724 key.offset = 0;
4725 key.type = BTRFS_ROOT_ITEM_KEY;
4726 key.objectid++;
4727 } else {
4728 break;
4729 }
4730 cond_resched();
4731 }
4732
4733 out:
4734 btrfs_free_path(path);
4735 if (trans && !IS_ERR(trans))
4736 btrfs_end_transaction(trans);
4737 if (ret)
4738 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4739 else if (!closing)
4740 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4741 up(&fs_info->uuid_tree_rescan_sem);
4742 return 0;
4743 }
4744
btrfs_create_uuid_tree(struct btrfs_fs_info * fs_info)4745 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4746 {
4747 struct btrfs_trans_handle *trans;
4748 struct btrfs_root *tree_root = fs_info->tree_root;
4749 struct btrfs_root *uuid_root;
4750 struct task_struct *task;
4751 int ret;
4752
4753 /*
4754 * 1 - root node
4755 * 1 - root item
4756 */
4757 trans = btrfs_start_transaction(tree_root, 2);
4758 if (IS_ERR(trans))
4759 return PTR_ERR(trans);
4760
4761 uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4762 if (IS_ERR(uuid_root)) {
4763 ret = PTR_ERR(uuid_root);
4764 btrfs_abort_transaction(trans, ret);
4765 btrfs_end_transaction(trans);
4766 return ret;
4767 }
4768
4769 fs_info->uuid_root = uuid_root;
4770
4771 ret = btrfs_commit_transaction(trans);
4772 if (ret)
4773 return ret;
4774
4775 down(&fs_info->uuid_tree_rescan_sem);
4776 task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4777 if (IS_ERR(task)) {
4778 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4779 btrfs_warn(fs_info, "failed to start uuid_scan task");
4780 up(&fs_info->uuid_tree_rescan_sem);
4781 return PTR_ERR(task);
4782 }
4783
4784 return 0;
4785 }
4786
4787 /*
4788 * shrinking a device means finding all of the device extents past
4789 * the new size, and then following the back refs to the chunks.
4790 * The chunk relocation code actually frees the device extent
4791 */
btrfs_shrink_device(struct btrfs_device * device,u64 new_size)4792 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4793 {
4794 struct btrfs_fs_info *fs_info = device->fs_info;
4795 struct btrfs_root *root = fs_info->dev_root;
4796 struct btrfs_trans_handle *trans;
4797 struct btrfs_dev_extent *dev_extent = NULL;
4798 struct btrfs_path *path;
4799 u64 length;
4800 u64 chunk_offset;
4801 int ret;
4802 int slot;
4803 int failed = 0;
4804 bool retried = false;
4805 struct extent_buffer *l;
4806 struct btrfs_key key;
4807 struct btrfs_super_block *super_copy = fs_info->super_copy;
4808 u64 old_total = btrfs_super_total_bytes(super_copy);
4809 u64 old_size = btrfs_device_get_total_bytes(device);
4810 u64 diff;
4811 u64 start;
4812
4813 new_size = round_down(new_size, fs_info->sectorsize);
4814 start = new_size;
4815 diff = round_down(old_size - new_size, fs_info->sectorsize);
4816
4817 if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4818 return -EINVAL;
4819
4820 path = btrfs_alloc_path();
4821 if (!path)
4822 return -ENOMEM;
4823
4824 path->reada = READA_BACK;
4825
4826 trans = btrfs_start_transaction(root, 0);
4827 if (IS_ERR(trans)) {
4828 btrfs_free_path(path);
4829 return PTR_ERR(trans);
4830 }
4831
4832 mutex_lock(&fs_info->chunk_mutex);
4833
4834 btrfs_device_set_total_bytes(device, new_size);
4835 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4836 device->fs_devices->total_rw_bytes -= diff;
4837 atomic64_sub(diff, &fs_info->free_chunk_space);
4838 }
4839
4840 /*
4841 * Once the device's size has been set to the new size, ensure all
4842 * in-memory chunks are synced to disk so that the loop below sees them
4843 * and relocates them accordingly.
4844 */
4845 if (contains_pending_extent(device, &start, diff)) {
4846 mutex_unlock(&fs_info->chunk_mutex);
4847 ret = btrfs_commit_transaction(trans);
4848 if (ret)
4849 goto done;
4850 } else {
4851 mutex_unlock(&fs_info->chunk_mutex);
4852 btrfs_end_transaction(trans);
4853 }
4854
4855 again:
4856 key.objectid = device->devid;
4857 key.offset = (u64)-1;
4858 key.type = BTRFS_DEV_EXTENT_KEY;
4859
4860 do {
4861 mutex_lock(&fs_info->reclaim_bgs_lock);
4862 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4863 if (ret < 0) {
4864 mutex_unlock(&fs_info->reclaim_bgs_lock);
4865 goto done;
4866 }
4867
4868 ret = btrfs_previous_item(root, path, 0, key.type);
4869 if (ret) {
4870 mutex_unlock(&fs_info->reclaim_bgs_lock);
4871 if (ret < 0)
4872 goto done;
4873 ret = 0;
4874 btrfs_release_path(path);
4875 break;
4876 }
4877
4878 l = path->nodes[0];
4879 slot = path->slots[0];
4880 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4881
4882 if (key.objectid != device->devid) {
4883 mutex_unlock(&fs_info->reclaim_bgs_lock);
4884 btrfs_release_path(path);
4885 break;
4886 }
4887
4888 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4889 length = btrfs_dev_extent_length(l, dev_extent);
4890
4891 if (key.offset + length <= new_size) {
4892 mutex_unlock(&fs_info->reclaim_bgs_lock);
4893 btrfs_release_path(path);
4894 break;
4895 }
4896
4897 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4898 btrfs_release_path(path);
4899
4900 /*
4901 * We may be relocating the only data chunk we have,
4902 * which could potentially end up with losing data's
4903 * raid profile, so lets allocate an empty one in
4904 * advance.
4905 */
4906 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4907 if (ret < 0) {
4908 mutex_unlock(&fs_info->reclaim_bgs_lock);
4909 goto done;
4910 }
4911
4912 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4913 mutex_unlock(&fs_info->reclaim_bgs_lock);
4914 if (ret == -ENOSPC) {
4915 failed++;
4916 } else if (ret) {
4917 if (ret == -ETXTBSY) {
4918 btrfs_warn(fs_info,
4919 "could not shrink block group %llu due to active swapfile",
4920 chunk_offset);
4921 }
4922 goto done;
4923 }
4924 } while (key.offset-- > 0);
4925
4926 if (failed && !retried) {
4927 failed = 0;
4928 retried = true;
4929 goto again;
4930 } else if (failed && retried) {
4931 ret = -ENOSPC;
4932 goto done;
4933 }
4934
4935 /* Shrinking succeeded, else we would be at "done". */
4936 trans = btrfs_start_transaction(root, 0);
4937 if (IS_ERR(trans)) {
4938 ret = PTR_ERR(trans);
4939 goto done;
4940 }
4941
4942 mutex_lock(&fs_info->chunk_mutex);
4943 /* Clear all state bits beyond the shrunk device size */
4944 clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4945 CHUNK_STATE_MASK);
4946
4947 btrfs_device_set_disk_total_bytes(device, new_size);
4948 if (list_empty(&device->post_commit_list))
4949 list_add_tail(&device->post_commit_list,
4950 &trans->transaction->dev_update_list);
4951
4952 WARN_ON(diff > old_total);
4953 btrfs_set_super_total_bytes(super_copy,
4954 round_down(old_total - diff, fs_info->sectorsize));
4955 mutex_unlock(&fs_info->chunk_mutex);
4956
4957 btrfs_reserve_chunk_metadata(trans, false);
4958 /* Now btrfs_update_device() will change the on-disk size. */
4959 ret = btrfs_update_device(trans, device);
4960 btrfs_trans_release_chunk_metadata(trans);
4961 if (ret < 0) {
4962 btrfs_abort_transaction(trans, ret);
4963 btrfs_end_transaction(trans);
4964 } else {
4965 ret = btrfs_commit_transaction(trans);
4966 }
4967 done:
4968 btrfs_free_path(path);
4969 if (ret) {
4970 mutex_lock(&fs_info->chunk_mutex);
4971 btrfs_device_set_total_bytes(device, old_size);
4972 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4973 device->fs_devices->total_rw_bytes += diff;
4974 atomic64_add(diff, &fs_info->free_chunk_space);
4975 mutex_unlock(&fs_info->chunk_mutex);
4976 }
4977 return ret;
4978 }
4979
btrfs_add_system_chunk(struct btrfs_fs_info * fs_info,struct btrfs_key * key,struct btrfs_chunk * chunk,int item_size)4980 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4981 struct btrfs_key *key,
4982 struct btrfs_chunk *chunk, int item_size)
4983 {
4984 struct btrfs_super_block *super_copy = fs_info->super_copy;
4985 struct btrfs_disk_key disk_key;
4986 u32 array_size;
4987 u8 *ptr;
4988
4989 lockdep_assert_held(&fs_info->chunk_mutex);
4990
4991 array_size = btrfs_super_sys_array_size(super_copy);
4992 if (array_size + item_size + sizeof(disk_key)
4993 > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
4994 return -EFBIG;
4995
4996 ptr = super_copy->sys_chunk_array + array_size;
4997 btrfs_cpu_key_to_disk(&disk_key, key);
4998 memcpy(ptr, &disk_key, sizeof(disk_key));
4999 ptr += sizeof(disk_key);
5000 memcpy(ptr, chunk, item_size);
5001 item_size += sizeof(disk_key);
5002 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
5003
5004 return 0;
5005 }
5006
5007 /*
5008 * sort the devices in descending order by max_avail, total_avail
5009 */
btrfs_cmp_device_info(const void * a,const void * b)5010 static int btrfs_cmp_device_info(const void *a, const void *b)
5011 {
5012 const struct btrfs_device_info *di_a = a;
5013 const struct btrfs_device_info *di_b = b;
5014
5015 if (di_a->max_avail > di_b->max_avail)
5016 return -1;
5017 if (di_a->max_avail < di_b->max_avail)
5018 return 1;
5019 if (di_a->total_avail > di_b->total_avail)
5020 return -1;
5021 if (di_a->total_avail < di_b->total_avail)
5022 return 1;
5023 return 0;
5024 }
5025
check_raid56_incompat_flag(struct btrfs_fs_info * info,u64 type)5026 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
5027 {
5028 if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
5029 return;
5030
5031 btrfs_set_fs_incompat(info, RAID56);
5032 }
5033
check_raid1c34_incompat_flag(struct btrfs_fs_info * info,u64 type)5034 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
5035 {
5036 if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
5037 return;
5038
5039 btrfs_set_fs_incompat(info, RAID1C34);
5040 }
5041
5042 /*
5043 * Structure used internally for btrfs_create_chunk() function.
5044 * Wraps needed parameters.
5045 */
5046 struct alloc_chunk_ctl {
5047 u64 start;
5048 u64 type;
5049 /* Total number of stripes to allocate */
5050 int num_stripes;
5051 /* sub_stripes info for map */
5052 int sub_stripes;
5053 /* Stripes per device */
5054 int dev_stripes;
5055 /* Maximum number of devices to use */
5056 int devs_max;
5057 /* Minimum number of devices to use */
5058 int devs_min;
5059 /* ndevs has to be a multiple of this */
5060 int devs_increment;
5061 /* Number of copies */
5062 int ncopies;
5063 /* Number of stripes worth of bytes to store parity information */
5064 int nparity;
5065 u64 max_stripe_size;
5066 u64 max_chunk_size;
5067 u64 dev_extent_min;
5068 u64 stripe_size;
5069 u64 chunk_size;
5070 int ndevs;
5071 };
5072
init_alloc_chunk_ctl_policy_regular(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)5073 static void init_alloc_chunk_ctl_policy_regular(
5074 struct btrfs_fs_devices *fs_devices,
5075 struct alloc_chunk_ctl *ctl)
5076 {
5077 struct btrfs_space_info *space_info;
5078
5079 space_info = btrfs_find_space_info(fs_devices->fs_info, ctl->type);
5080 ASSERT(space_info);
5081
5082 ctl->max_chunk_size = READ_ONCE(space_info->chunk_size);
5083 ctl->max_stripe_size = ctl->max_chunk_size;
5084
5085 if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM)
5086 ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
5087
5088 /* We don't want a chunk larger than 10% of writable space */
5089 ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
5090 ctl->max_chunk_size);
5091 ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
5092 }
5093
init_alloc_chunk_ctl_policy_zoned(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)5094 static void init_alloc_chunk_ctl_policy_zoned(
5095 struct btrfs_fs_devices *fs_devices,
5096 struct alloc_chunk_ctl *ctl)
5097 {
5098 u64 zone_size = fs_devices->fs_info->zone_size;
5099 u64 limit;
5100 int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
5101 int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
5102 u64 min_chunk_size = min_data_stripes * zone_size;
5103 u64 type = ctl->type;
5104
5105 ctl->max_stripe_size = zone_size;
5106 if (type & BTRFS_BLOCK_GROUP_DATA) {
5107 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
5108 zone_size);
5109 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5110 ctl->max_chunk_size = ctl->max_stripe_size;
5111 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5112 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5113 ctl->devs_max = min_t(int, ctl->devs_max,
5114 BTRFS_MAX_DEVS_SYS_CHUNK);
5115 } else {
5116 BUG();
5117 }
5118
5119 /* We don't want a chunk larger than 10% of writable space */
5120 limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1),
5121 zone_size),
5122 min_chunk_size);
5123 ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
5124 ctl->dev_extent_min = zone_size * ctl->dev_stripes;
5125 }
5126
init_alloc_chunk_ctl(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl)5127 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
5128 struct alloc_chunk_ctl *ctl)
5129 {
5130 int index = btrfs_bg_flags_to_raid_index(ctl->type);
5131
5132 ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
5133 ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
5134 ctl->devs_max = btrfs_raid_array[index].devs_max;
5135 if (!ctl->devs_max)
5136 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
5137 ctl->devs_min = btrfs_raid_array[index].devs_min;
5138 ctl->devs_increment = btrfs_raid_array[index].devs_increment;
5139 ctl->ncopies = btrfs_raid_array[index].ncopies;
5140 ctl->nparity = btrfs_raid_array[index].nparity;
5141 ctl->ndevs = 0;
5142
5143 switch (fs_devices->chunk_alloc_policy) {
5144 case BTRFS_CHUNK_ALLOC_REGULAR:
5145 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
5146 break;
5147 case BTRFS_CHUNK_ALLOC_ZONED:
5148 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
5149 break;
5150 default:
5151 BUG();
5152 }
5153 }
5154
gather_device_info(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5155 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
5156 struct alloc_chunk_ctl *ctl,
5157 struct btrfs_device_info *devices_info)
5158 {
5159 struct btrfs_fs_info *info = fs_devices->fs_info;
5160 struct btrfs_device *device;
5161 u64 total_avail;
5162 u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5163 int ret;
5164 int ndevs = 0;
5165 u64 max_avail;
5166 u64 dev_offset;
5167
5168 /*
5169 * in the first pass through the devices list, we gather information
5170 * about the available holes on each device.
5171 */
5172 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5173 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5174 WARN(1, KERN_ERR
5175 "BTRFS: read-only device in alloc_list\n");
5176 continue;
5177 }
5178
5179 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5180 &device->dev_state) ||
5181 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5182 continue;
5183
5184 if (device->total_bytes > device->bytes_used)
5185 total_avail = device->total_bytes - device->bytes_used;
5186 else
5187 total_avail = 0;
5188
5189 /* If there is no space on this device, skip it. */
5190 if (total_avail < ctl->dev_extent_min)
5191 continue;
5192
5193 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5194 &max_avail);
5195 if (ret && ret != -ENOSPC)
5196 return ret;
5197
5198 if (ret == 0)
5199 max_avail = dev_extent_want;
5200
5201 if (max_avail < ctl->dev_extent_min) {
5202 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5203 btrfs_debug(info,
5204 "%s: devid %llu has no free space, have=%llu want=%llu",
5205 __func__, device->devid, max_avail,
5206 ctl->dev_extent_min);
5207 continue;
5208 }
5209
5210 if (ndevs == fs_devices->rw_devices) {
5211 WARN(1, "%s: found more than %llu devices\n",
5212 __func__, fs_devices->rw_devices);
5213 break;
5214 }
5215 devices_info[ndevs].dev_offset = dev_offset;
5216 devices_info[ndevs].max_avail = max_avail;
5217 devices_info[ndevs].total_avail = total_avail;
5218 devices_info[ndevs].dev = device;
5219 ++ndevs;
5220 }
5221 ctl->ndevs = ndevs;
5222
5223 /*
5224 * now sort the devices by hole size / available space
5225 */
5226 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5227 btrfs_cmp_device_info, NULL);
5228
5229 return 0;
5230 }
5231
decide_stripe_size_regular(struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5232 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5233 struct btrfs_device_info *devices_info)
5234 {
5235 /* Number of stripes that count for block group size */
5236 int data_stripes;
5237
5238 /*
5239 * The primary goal is to maximize the number of stripes, so use as
5240 * many devices as possible, even if the stripes are not maximum sized.
5241 *
5242 * The DUP profile stores more than one stripe per device, the
5243 * max_avail is the total size so we have to adjust.
5244 */
5245 ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5246 ctl->dev_stripes);
5247 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5248
5249 /* This will have to be fixed for RAID1 and RAID10 over more drives */
5250 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5251
5252 /*
5253 * Use the number of data stripes to figure out how big this chunk is
5254 * really going to be in terms of logical address space, and compare
5255 * that answer with the max chunk size. If it's higher, we try to
5256 * reduce stripe_size.
5257 */
5258 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5259 /*
5260 * Reduce stripe_size, round it up to a 16MB boundary again and
5261 * then use it, unless it ends up being even bigger than the
5262 * previous value we had already.
5263 */
5264 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5265 data_stripes), SZ_16M),
5266 ctl->stripe_size);
5267 }
5268
5269 /* Stripe size should not go beyond 1G. */
5270 ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G);
5271
5272 /* Align to BTRFS_STRIPE_LEN */
5273 ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5274 ctl->chunk_size = ctl->stripe_size * data_stripes;
5275
5276 return 0;
5277 }
5278
decide_stripe_size_zoned(struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5279 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
5280 struct btrfs_device_info *devices_info)
5281 {
5282 u64 zone_size = devices_info[0].dev->zone_info->zone_size;
5283 /* Number of stripes that count for block group size */
5284 int data_stripes;
5285
5286 /*
5287 * It should hold because:
5288 * dev_extent_min == dev_extent_want == zone_size * dev_stripes
5289 */
5290 ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
5291
5292 ctl->stripe_size = zone_size;
5293 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5294 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5295
5296 /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
5297 if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5298 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
5299 ctl->stripe_size) + ctl->nparity,
5300 ctl->dev_stripes);
5301 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5302 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5303 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
5304 }
5305
5306 ctl->chunk_size = ctl->stripe_size * data_stripes;
5307
5308 return 0;
5309 }
5310
decide_stripe_size(struct btrfs_fs_devices * fs_devices,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5311 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5312 struct alloc_chunk_ctl *ctl,
5313 struct btrfs_device_info *devices_info)
5314 {
5315 struct btrfs_fs_info *info = fs_devices->fs_info;
5316
5317 /*
5318 * Round down to number of usable stripes, devs_increment can be any
5319 * number so we can't use round_down() that requires power of 2, while
5320 * rounddown is safe.
5321 */
5322 ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5323
5324 if (ctl->ndevs < ctl->devs_min) {
5325 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5326 btrfs_debug(info,
5327 "%s: not enough devices with free space: have=%d minimum required=%d",
5328 __func__, ctl->ndevs, ctl->devs_min);
5329 }
5330 return -ENOSPC;
5331 }
5332
5333 ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5334
5335 switch (fs_devices->chunk_alloc_policy) {
5336 case BTRFS_CHUNK_ALLOC_REGULAR:
5337 return decide_stripe_size_regular(ctl, devices_info);
5338 case BTRFS_CHUNK_ALLOC_ZONED:
5339 return decide_stripe_size_zoned(ctl, devices_info);
5340 default:
5341 BUG();
5342 }
5343 }
5344
create_chunk(struct btrfs_trans_handle * trans,struct alloc_chunk_ctl * ctl,struct btrfs_device_info * devices_info)5345 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
5346 struct alloc_chunk_ctl *ctl,
5347 struct btrfs_device_info *devices_info)
5348 {
5349 struct btrfs_fs_info *info = trans->fs_info;
5350 struct map_lookup *map = NULL;
5351 struct extent_map_tree *em_tree;
5352 struct btrfs_block_group *block_group;
5353 struct extent_map *em;
5354 u64 start = ctl->start;
5355 u64 type = ctl->type;
5356 int ret;
5357 int i;
5358 int j;
5359
5360 map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5361 if (!map)
5362 return ERR_PTR(-ENOMEM);
5363 map->num_stripes = ctl->num_stripes;
5364
5365 for (i = 0; i < ctl->ndevs; ++i) {
5366 for (j = 0; j < ctl->dev_stripes; ++j) {
5367 int s = i * ctl->dev_stripes + j;
5368 map->stripes[s].dev = devices_info[i].dev;
5369 map->stripes[s].physical = devices_info[i].dev_offset +
5370 j * ctl->stripe_size;
5371 }
5372 }
5373 map->stripe_len = BTRFS_STRIPE_LEN;
5374 map->io_align = BTRFS_STRIPE_LEN;
5375 map->io_width = BTRFS_STRIPE_LEN;
5376 map->type = type;
5377 map->sub_stripes = ctl->sub_stripes;
5378
5379 trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5380
5381 em = alloc_extent_map();
5382 if (!em) {
5383 kfree(map);
5384 return ERR_PTR(-ENOMEM);
5385 }
5386 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5387 em->map_lookup = map;
5388 em->start = start;
5389 em->len = ctl->chunk_size;
5390 em->block_start = 0;
5391 em->block_len = em->len;
5392 em->orig_block_len = ctl->stripe_size;
5393
5394 em_tree = &info->mapping_tree;
5395 write_lock(&em_tree->lock);
5396 ret = add_extent_mapping(em_tree, em, 0);
5397 if (ret) {
5398 write_unlock(&em_tree->lock);
5399 free_extent_map(em);
5400 return ERR_PTR(ret);
5401 }
5402 write_unlock(&em_tree->lock);
5403
5404 block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5405 if (IS_ERR(block_group))
5406 goto error_del_extent;
5407
5408 for (i = 0; i < map->num_stripes; i++) {
5409 struct btrfs_device *dev = map->stripes[i].dev;
5410
5411 btrfs_device_set_bytes_used(dev,
5412 dev->bytes_used + ctl->stripe_size);
5413 if (list_empty(&dev->post_commit_list))
5414 list_add_tail(&dev->post_commit_list,
5415 &trans->transaction->dev_update_list);
5416 }
5417
5418 atomic64_sub(ctl->stripe_size * map->num_stripes,
5419 &info->free_chunk_space);
5420
5421 free_extent_map(em);
5422 check_raid56_incompat_flag(info, type);
5423 check_raid1c34_incompat_flag(info, type);
5424
5425 return block_group;
5426
5427 error_del_extent:
5428 write_lock(&em_tree->lock);
5429 remove_extent_mapping(em_tree, em);
5430 write_unlock(&em_tree->lock);
5431
5432 /* One for our allocation */
5433 free_extent_map(em);
5434 /* One for the tree reference */
5435 free_extent_map(em);
5436
5437 return block_group;
5438 }
5439
btrfs_create_chunk(struct btrfs_trans_handle * trans,u64 type)5440 struct btrfs_block_group *btrfs_create_chunk(struct btrfs_trans_handle *trans,
5441 u64 type)
5442 {
5443 struct btrfs_fs_info *info = trans->fs_info;
5444 struct btrfs_fs_devices *fs_devices = info->fs_devices;
5445 struct btrfs_device_info *devices_info = NULL;
5446 struct alloc_chunk_ctl ctl;
5447 struct btrfs_block_group *block_group;
5448 int ret;
5449
5450 lockdep_assert_held(&info->chunk_mutex);
5451
5452 if (!alloc_profile_is_valid(type, 0)) {
5453 ASSERT(0);
5454 return ERR_PTR(-EINVAL);
5455 }
5456
5457 if (list_empty(&fs_devices->alloc_list)) {
5458 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5459 btrfs_debug(info, "%s: no writable device", __func__);
5460 return ERR_PTR(-ENOSPC);
5461 }
5462
5463 if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5464 btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5465 ASSERT(0);
5466 return ERR_PTR(-EINVAL);
5467 }
5468
5469 ctl.start = find_next_chunk(info);
5470 ctl.type = type;
5471 init_alloc_chunk_ctl(fs_devices, &ctl);
5472
5473 devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5474 GFP_NOFS);
5475 if (!devices_info)
5476 return ERR_PTR(-ENOMEM);
5477
5478 ret = gather_device_info(fs_devices, &ctl, devices_info);
5479 if (ret < 0) {
5480 block_group = ERR_PTR(ret);
5481 goto out;
5482 }
5483
5484 ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5485 if (ret < 0) {
5486 block_group = ERR_PTR(ret);
5487 goto out;
5488 }
5489
5490 block_group = create_chunk(trans, &ctl, devices_info);
5491
5492 out:
5493 kfree(devices_info);
5494 return block_group;
5495 }
5496
5497 /*
5498 * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5499 * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5500 * chunks.
5501 *
5502 * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5503 * phases.
5504 */
btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle * trans,struct btrfs_block_group * bg)5505 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
5506 struct btrfs_block_group *bg)
5507 {
5508 struct btrfs_fs_info *fs_info = trans->fs_info;
5509 struct btrfs_root *chunk_root = fs_info->chunk_root;
5510 struct btrfs_key key;
5511 struct btrfs_chunk *chunk;
5512 struct btrfs_stripe *stripe;
5513 struct extent_map *em;
5514 struct map_lookup *map;
5515 size_t item_size;
5516 int i;
5517 int ret;
5518
5519 /*
5520 * We take the chunk_mutex for 2 reasons:
5521 *
5522 * 1) Updates and insertions in the chunk btree must be done while holding
5523 * the chunk_mutex, as well as updating the system chunk array in the
5524 * superblock. See the comment on top of btrfs_chunk_alloc() for the
5525 * details;
5526 *
5527 * 2) To prevent races with the final phase of a device replace operation
5528 * that replaces the device object associated with the map's stripes,
5529 * because the device object's id can change at any time during that
5530 * final phase of the device replace operation
5531 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5532 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5533 * which would cause a failure when updating the device item, which does
5534 * not exists, or persisting a stripe of the chunk item with such ID.
5535 * Here we can't use the device_list_mutex because our caller already
5536 * has locked the chunk_mutex, and the final phase of device replace
5537 * acquires both mutexes - first the device_list_mutex and then the
5538 * chunk_mutex. Using any of those two mutexes protects us from a
5539 * concurrent device replace.
5540 */
5541 lockdep_assert_held(&fs_info->chunk_mutex);
5542
5543 em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
5544 if (IS_ERR(em)) {
5545 ret = PTR_ERR(em);
5546 btrfs_abort_transaction(trans, ret);
5547 return ret;
5548 }
5549
5550 map = em->map_lookup;
5551 item_size = btrfs_chunk_item_size(map->num_stripes);
5552
5553 chunk = kzalloc(item_size, GFP_NOFS);
5554 if (!chunk) {
5555 ret = -ENOMEM;
5556 btrfs_abort_transaction(trans, ret);
5557 goto out;
5558 }
5559
5560 for (i = 0; i < map->num_stripes; i++) {
5561 struct btrfs_device *device = map->stripes[i].dev;
5562
5563 ret = btrfs_update_device(trans, device);
5564 if (ret)
5565 goto out;
5566 }
5567
5568 stripe = &chunk->stripe;
5569 for (i = 0; i < map->num_stripes; i++) {
5570 struct btrfs_device *device = map->stripes[i].dev;
5571 const u64 dev_offset = map->stripes[i].physical;
5572
5573 btrfs_set_stack_stripe_devid(stripe, device->devid);
5574 btrfs_set_stack_stripe_offset(stripe, dev_offset);
5575 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5576 stripe++;
5577 }
5578
5579 btrfs_set_stack_chunk_length(chunk, bg->length);
5580 btrfs_set_stack_chunk_owner(chunk, BTRFS_EXTENT_TREE_OBJECTID);
5581 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5582 btrfs_set_stack_chunk_type(chunk, map->type);
5583 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5584 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5585 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5586 btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5587 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5588
5589 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5590 key.type = BTRFS_CHUNK_ITEM_KEY;
5591 key.offset = bg->start;
5592
5593 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5594 if (ret)
5595 goto out;
5596
5597 bg->chunk_item_inserted = 1;
5598
5599 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5600 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5601 if (ret)
5602 goto out;
5603 }
5604
5605 out:
5606 kfree(chunk);
5607 free_extent_map(em);
5608 return ret;
5609 }
5610
init_first_rw_device(struct btrfs_trans_handle * trans)5611 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5612 {
5613 struct btrfs_fs_info *fs_info = trans->fs_info;
5614 u64 alloc_profile;
5615 struct btrfs_block_group *meta_bg;
5616 struct btrfs_block_group *sys_bg;
5617
5618 /*
5619 * When adding a new device for sprouting, the seed device is read-only
5620 * so we must first allocate a metadata and a system chunk. But before
5621 * adding the block group items to the extent, device and chunk btrees,
5622 * we must first:
5623 *
5624 * 1) Create both chunks without doing any changes to the btrees, as
5625 * otherwise we would get -ENOSPC since the block groups from the
5626 * seed device are read-only;
5627 *
5628 * 2) Add the device item for the new sprout device - finishing the setup
5629 * of a new block group requires updating the device item in the chunk
5630 * btree, so it must exist when we attempt to do it. The previous step
5631 * ensures this does not fail with -ENOSPC.
5632 *
5633 * After that we can add the block group items to their btrees:
5634 * update existing device item in the chunk btree, add a new block group
5635 * item to the extent btree, add a new chunk item to the chunk btree and
5636 * finally add the new device extent items to the devices btree.
5637 */
5638
5639 alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5640 meta_bg = btrfs_create_chunk(trans, alloc_profile);
5641 if (IS_ERR(meta_bg))
5642 return PTR_ERR(meta_bg);
5643
5644 alloc_profile = btrfs_system_alloc_profile(fs_info);
5645 sys_bg = btrfs_create_chunk(trans, alloc_profile);
5646 if (IS_ERR(sys_bg))
5647 return PTR_ERR(sys_bg);
5648
5649 return 0;
5650 }
5651
btrfs_chunk_max_errors(struct map_lookup * map)5652 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5653 {
5654 const int index = btrfs_bg_flags_to_raid_index(map->type);
5655
5656 return btrfs_raid_array[index].tolerated_failures;
5657 }
5658
btrfs_chunk_writeable(struct btrfs_fs_info * fs_info,u64 chunk_offset)5659 bool btrfs_chunk_writeable(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5660 {
5661 struct extent_map *em;
5662 struct map_lookup *map;
5663 int miss_ndevs = 0;
5664 int i;
5665 bool ret = true;
5666
5667 em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5668 if (IS_ERR(em))
5669 return false;
5670
5671 map = em->map_lookup;
5672 for (i = 0; i < map->num_stripes; i++) {
5673 if (test_bit(BTRFS_DEV_STATE_MISSING,
5674 &map->stripes[i].dev->dev_state)) {
5675 miss_ndevs++;
5676 continue;
5677 }
5678 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5679 &map->stripes[i].dev->dev_state)) {
5680 ret = false;
5681 goto end;
5682 }
5683 }
5684
5685 /*
5686 * If the number of missing devices is larger than max errors, we can
5687 * not write the data into that chunk successfully.
5688 */
5689 if (miss_ndevs > btrfs_chunk_max_errors(map))
5690 ret = false;
5691 end:
5692 free_extent_map(em);
5693 return ret;
5694 }
5695
btrfs_mapping_tree_free(struct extent_map_tree * tree)5696 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5697 {
5698 struct extent_map *em;
5699
5700 while (1) {
5701 write_lock(&tree->lock);
5702 em = lookup_extent_mapping(tree, 0, (u64)-1);
5703 if (em)
5704 remove_extent_mapping(tree, em);
5705 write_unlock(&tree->lock);
5706 if (!em)
5707 break;
5708 /* once for us */
5709 free_extent_map(em);
5710 /* once for the tree */
5711 free_extent_map(em);
5712 }
5713 }
5714
btrfs_num_copies(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5715 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5716 {
5717 struct extent_map *em;
5718 struct map_lookup *map;
5719 int ret;
5720
5721 em = btrfs_get_chunk_map(fs_info, logical, len);
5722 if (IS_ERR(em))
5723 /*
5724 * We could return errors for these cases, but that could get
5725 * ugly and we'd probably do the same thing which is just not do
5726 * anything else and exit, so return 1 so the callers don't try
5727 * to use other copies.
5728 */
5729 return 1;
5730
5731 map = em->map_lookup;
5732 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5733 ret = map->num_stripes;
5734 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5735 ret = map->sub_stripes;
5736 else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5737 ret = 2;
5738 else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5739 /*
5740 * There could be two corrupted data stripes, we need
5741 * to loop retry in order to rebuild the correct data.
5742 *
5743 * Fail a stripe at a time on every retry except the
5744 * stripe under reconstruction.
5745 */
5746 ret = map->num_stripes;
5747 else
5748 ret = 1;
5749 free_extent_map(em);
5750
5751 down_read(&fs_info->dev_replace.rwsem);
5752 if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5753 fs_info->dev_replace.tgtdev)
5754 ret++;
5755 up_read(&fs_info->dev_replace.rwsem);
5756
5757 return ret;
5758 }
5759
btrfs_full_stripe_len(struct btrfs_fs_info * fs_info,u64 logical)5760 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5761 u64 logical)
5762 {
5763 struct extent_map *em;
5764 struct map_lookup *map;
5765 unsigned long len = fs_info->sectorsize;
5766
5767 em = btrfs_get_chunk_map(fs_info, logical, len);
5768
5769 if (!WARN_ON(IS_ERR(em))) {
5770 map = em->map_lookup;
5771 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5772 len = map->stripe_len * nr_data_stripes(map);
5773 free_extent_map(em);
5774 }
5775 return len;
5776 }
5777
btrfs_is_parity_mirror(struct btrfs_fs_info * fs_info,u64 logical,u64 len)5778 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5779 {
5780 struct extent_map *em;
5781 struct map_lookup *map;
5782 int ret = 0;
5783
5784 em = btrfs_get_chunk_map(fs_info, logical, len);
5785
5786 if(!WARN_ON(IS_ERR(em))) {
5787 map = em->map_lookup;
5788 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5789 ret = 1;
5790 free_extent_map(em);
5791 }
5792 return ret;
5793 }
5794
find_live_mirror(struct btrfs_fs_info * fs_info,struct map_lookup * map,int first,int dev_replace_is_ongoing)5795 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5796 struct map_lookup *map, int first,
5797 int dev_replace_is_ongoing)
5798 {
5799 int i;
5800 int num_stripes;
5801 int preferred_mirror;
5802 int tolerance;
5803 struct btrfs_device *srcdev;
5804
5805 ASSERT((map->type &
5806 (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5807
5808 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5809 num_stripes = map->sub_stripes;
5810 else
5811 num_stripes = map->num_stripes;
5812
5813 switch (fs_info->fs_devices->read_policy) {
5814 default:
5815 /* Shouldn't happen, just warn and use pid instead of failing */
5816 btrfs_warn_rl(fs_info,
5817 "unknown read_policy type %u, reset to pid",
5818 fs_info->fs_devices->read_policy);
5819 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID;
5820 fallthrough;
5821 case BTRFS_READ_POLICY_PID:
5822 preferred_mirror = first + (current->pid % num_stripes);
5823 break;
5824 }
5825
5826 if (dev_replace_is_ongoing &&
5827 fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5828 BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5829 srcdev = fs_info->dev_replace.srcdev;
5830 else
5831 srcdev = NULL;
5832
5833 /*
5834 * try to avoid the drive that is the source drive for a
5835 * dev-replace procedure, only choose it if no other non-missing
5836 * mirror is available
5837 */
5838 for (tolerance = 0; tolerance < 2; tolerance++) {
5839 if (map->stripes[preferred_mirror].dev->bdev &&
5840 (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5841 return preferred_mirror;
5842 for (i = first; i < first + num_stripes; i++) {
5843 if (map->stripes[i].dev->bdev &&
5844 (tolerance || map->stripes[i].dev != srcdev))
5845 return i;
5846 }
5847 }
5848
5849 /* we couldn't find one that doesn't fail. Just return something
5850 * and the io error handling code will clean up eventually
5851 */
5852 return preferred_mirror;
5853 }
5854
5855 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
sort_parity_stripes(struct btrfs_io_context * bioc,int num_stripes)5856 static void sort_parity_stripes(struct btrfs_io_context *bioc, int num_stripes)
5857 {
5858 int i;
5859 int again = 1;
5860
5861 while (again) {
5862 again = 0;
5863 for (i = 0; i < num_stripes - 1; i++) {
5864 /* Swap if parity is on a smaller index */
5865 if (bioc->raid_map[i] > bioc->raid_map[i + 1]) {
5866 swap(bioc->stripes[i], bioc->stripes[i + 1]);
5867 swap(bioc->raid_map[i], bioc->raid_map[i + 1]);
5868 again = 1;
5869 }
5870 }
5871 }
5872 }
5873
alloc_btrfs_io_context(struct btrfs_fs_info * fs_info,int total_stripes,int real_stripes)5874 static struct btrfs_io_context *alloc_btrfs_io_context(struct btrfs_fs_info *fs_info,
5875 int total_stripes,
5876 int real_stripes)
5877 {
5878 struct btrfs_io_context *bioc = kzalloc(
5879 /* The size of btrfs_io_context */
5880 sizeof(struct btrfs_io_context) +
5881 /* Plus the variable array for the stripes */
5882 sizeof(struct btrfs_io_stripe) * (total_stripes) +
5883 /* Plus the variable array for the tgt dev */
5884 sizeof(int) * (real_stripes) +
5885 /*
5886 * Plus the raid_map, which includes both the tgt dev
5887 * and the stripes.
5888 */
5889 sizeof(u64) * (total_stripes),
5890 GFP_NOFS|__GFP_NOFAIL);
5891
5892 atomic_set(&bioc->error, 0);
5893 refcount_set(&bioc->refs, 1);
5894
5895 bioc->fs_info = fs_info;
5896 bioc->tgtdev_map = (int *)(bioc->stripes + total_stripes);
5897 bioc->raid_map = (u64 *)(bioc->tgtdev_map + real_stripes);
5898
5899 return bioc;
5900 }
5901
btrfs_get_bioc(struct btrfs_io_context * bioc)5902 void btrfs_get_bioc(struct btrfs_io_context *bioc)
5903 {
5904 WARN_ON(!refcount_read(&bioc->refs));
5905 refcount_inc(&bioc->refs);
5906 }
5907
btrfs_put_bioc(struct btrfs_io_context * bioc)5908 void btrfs_put_bioc(struct btrfs_io_context *bioc)
5909 {
5910 if (!bioc)
5911 return;
5912 if (refcount_dec_and_test(&bioc->refs))
5913 kfree(bioc);
5914 }
5915
5916 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5917 /*
5918 * Please note that, discard won't be sent to target device of device
5919 * replace.
5920 */
__btrfs_map_block_for_discard(struct btrfs_fs_info * fs_info,u64 logical,u64 * length_ret,struct btrfs_io_context ** bioc_ret)5921 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5922 u64 logical, u64 *length_ret,
5923 struct btrfs_io_context **bioc_ret)
5924 {
5925 struct extent_map *em;
5926 struct map_lookup *map;
5927 struct btrfs_io_context *bioc;
5928 u64 length = *length_ret;
5929 u64 offset;
5930 u64 stripe_nr;
5931 u64 stripe_nr_end;
5932 u64 stripe_end_offset;
5933 u64 stripe_cnt;
5934 u64 stripe_len;
5935 u64 stripe_offset;
5936 u64 num_stripes;
5937 u32 stripe_index;
5938 u32 factor = 0;
5939 u32 sub_stripes = 0;
5940 u64 stripes_per_dev = 0;
5941 u32 remaining_stripes = 0;
5942 u32 last_stripe = 0;
5943 int ret = 0;
5944 int i;
5945
5946 /* Discard always returns a bioc. */
5947 ASSERT(bioc_ret);
5948
5949 em = btrfs_get_chunk_map(fs_info, logical, length);
5950 if (IS_ERR(em))
5951 return PTR_ERR(em);
5952
5953 map = em->map_lookup;
5954 /* we don't discard raid56 yet */
5955 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5956 ret = -EOPNOTSUPP;
5957 goto out;
5958 }
5959
5960 offset = logical - em->start;
5961 length = min_t(u64, em->start + em->len - logical, length);
5962 *length_ret = length;
5963
5964 stripe_len = map->stripe_len;
5965 /*
5966 * stripe_nr counts the total number of stripes we have to stride
5967 * to get to this block
5968 */
5969 stripe_nr = div64_u64(offset, stripe_len);
5970
5971 /* stripe_offset is the offset of this block in its stripe */
5972 stripe_offset = offset - stripe_nr * stripe_len;
5973
5974 stripe_nr_end = round_up(offset + length, map->stripe_len);
5975 stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5976 stripe_cnt = stripe_nr_end - stripe_nr;
5977 stripe_end_offset = stripe_nr_end * map->stripe_len -
5978 (offset + length);
5979 /*
5980 * after this, stripe_nr is the number of stripes on this
5981 * device we have to walk to find the data, and stripe_index is
5982 * the number of our device in the stripe array
5983 */
5984 num_stripes = 1;
5985 stripe_index = 0;
5986 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5987 BTRFS_BLOCK_GROUP_RAID10)) {
5988 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5989 sub_stripes = 1;
5990 else
5991 sub_stripes = map->sub_stripes;
5992
5993 factor = map->num_stripes / sub_stripes;
5994 num_stripes = min_t(u64, map->num_stripes,
5995 sub_stripes * stripe_cnt);
5996 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5997 stripe_index *= sub_stripes;
5998 stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5999 &remaining_stripes);
6000 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
6001 last_stripe *= sub_stripes;
6002 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
6003 BTRFS_BLOCK_GROUP_DUP)) {
6004 num_stripes = map->num_stripes;
6005 } else {
6006 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6007 &stripe_index);
6008 }
6009
6010 bioc = alloc_btrfs_io_context(fs_info, num_stripes, 0);
6011 if (!bioc) {
6012 ret = -ENOMEM;
6013 goto out;
6014 }
6015
6016 for (i = 0; i < num_stripes; i++) {
6017 bioc->stripes[i].physical =
6018 map->stripes[stripe_index].physical +
6019 stripe_offset + stripe_nr * map->stripe_len;
6020 bioc->stripes[i].dev = map->stripes[stripe_index].dev;
6021
6022 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
6023 BTRFS_BLOCK_GROUP_RAID10)) {
6024 bioc->stripes[i].length = stripes_per_dev *
6025 map->stripe_len;
6026
6027 if (i / sub_stripes < remaining_stripes)
6028 bioc->stripes[i].length += map->stripe_len;
6029
6030 /*
6031 * Special for the first stripe and
6032 * the last stripe:
6033 *
6034 * |-------|...|-------|
6035 * |----------|
6036 * off end_off
6037 */
6038 if (i < sub_stripes)
6039 bioc->stripes[i].length -= stripe_offset;
6040
6041 if (stripe_index >= last_stripe &&
6042 stripe_index <= (last_stripe +
6043 sub_stripes - 1))
6044 bioc->stripes[i].length -= stripe_end_offset;
6045
6046 if (i == sub_stripes - 1)
6047 stripe_offset = 0;
6048 } else {
6049 bioc->stripes[i].length = length;
6050 }
6051
6052 stripe_index++;
6053 if (stripe_index == map->num_stripes) {
6054 stripe_index = 0;
6055 stripe_nr++;
6056 }
6057 }
6058
6059 *bioc_ret = bioc;
6060 bioc->map_type = map->type;
6061 bioc->num_stripes = num_stripes;
6062 out:
6063 free_extent_map(em);
6064 return ret;
6065 }
6066
6067 /*
6068 * In dev-replace case, for repair case (that's the only case where the mirror
6069 * is selected explicitly when calling btrfs_map_block), blocks left of the
6070 * left cursor can also be read from the target drive.
6071 *
6072 * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
6073 * array of stripes.
6074 * For READ, it also needs to be supported using the same mirror number.
6075 *
6076 * If the requested block is not left of the left cursor, EIO is returned. This
6077 * can happen because btrfs_num_copies() returns one more in the dev-replace
6078 * case.
6079 */
get_extra_mirror_from_replace(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 srcdev_devid,int * mirror_num,u64 * physical)6080 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
6081 u64 logical, u64 length,
6082 u64 srcdev_devid, int *mirror_num,
6083 u64 *physical)
6084 {
6085 struct btrfs_io_context *bioc = NULL;
6086 int num_stripes;
6087 int index_srcdev = 0;
6088 int found = 0;
6089 u64 physical_of_found = 0;
6090 int i;
6091 int ret = 0;
6092
6093 ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
6094 logical, &length, &bioc, 0, 0);
6095 if (ret) {
6096 ASSERT(bioc == NULL);
6097 return ret;
6098 }
6099
6100 num_stripes = bioc->num_stripes;
6101 if (*mirror_num > num_stripes) {
6102 /*
6103 * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
6104 * that means that the requested area is not left of the left
6105 * cursor
6106 */
6107 btrfs_put_bioc(bioc);
6108 return -EIO;
6109 }
6110
6111 /*
6112 * process the rest of the function using the mirror_num of the source
6113 * drive. Therefore look it up first. At the end, patch the device
6114 * pointer to the one of the target drive.
6115 */
6116 for (i = 0; i < num_stripes; i++) {
6117 if (bioc->stripes[i].dev->devid != srcdev_devid)
6118 continue;
6119
6120 /*
6121 * In case of DUP, in order to keep it simple, only add the
6122 * mirror with the lowest physical address
6123 */
6124 if (found &&
6125 physical_of_found <= bioc->stripes[i].physical)
6126 continue;
6127
6128 index_srcdev = i;
6129 found = 1;
6130 physical_of_found = bioc->stripes[i].physical;
6131 }
6132
6133 btrfs_put_bioc(bioc);
6134
6135 ASSERT(found);
6136 if (!found)
6137 return -EIO;
6138
6139 *mirror_num = index_srcdev + 1;
6140 *physical = physical_of_found;
6141 return ret;
6142 }
6143
is_block_group_to_copy(struct btrfs_fs_info * fs_info,u64 logical)6144 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
6145 {
6146 struct btrfs_block_group *cache;
6147 bool ret;
6148
6149 /* Non zoned filesystem does not use "to_copy" flag */
6150 if (!btrfs_is_zoned(fs_info))
6151 return false;
6152
6153 cache = btrfs_lookup_block_group(fs_info, logical);
6154
6155 spin_lock(&cache->lock);
6156 ret = cache->to_copy;
6157 spin_unlock(&cache->lock);
6158
6159 btrfs_put_block_group(cache);
6160 return ret;
6161 }
6162
handle_ops_on_dev_replace(enum btrfs_map_op op,struct btrfs_io_context ** bioc_ret,struct btrfs_dev_replace * dev_replace,u64 logical,int * num_stripes_ret,int * max_errors_ret)6163 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
6164 struct btrfs_io_context **bioc_ret,
6165 struct btrfs_dev_replace *dev_replace,
6166 u64 logical,
6167 int *num_stripes_ret, int *max_errors_ret)
6168 {
6169 struct btrfs_io_context *bioc = *bioc_ret;
6170 u64 srcdev_devid = dev_replace->srcdev->devid;
6171 int tgtdev_indexes = 0;
6172 int num_stripes = *num_stripes_ret;
6173 int max_errors = *max_errors_ret;
6174 int i;
6175
6176 if (op == BTRFS_MAP_WRITE) {
6177 int index_where_to_add;
6178
6179 /*
6180 * A block group which have "to_copy" set will eventually
6181 * copied by dev-replace process. We can avoid cloning IO here.
6182 */
6183 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
6184 return;
6185
6186 /*
6187 * duplicate the write operations while the dev replace
6188 * procedure is running. Since the copying of the old disk to
6189 * the new disk takes place at run time while the filesystem is
6190 * mounted writable, the regular write operations to the old
6191 * disk have to be duplicated to go to the new disk as well.
6192 *
6193 * Note that device->missing is handled by the caller, and that
6194 * the write to the old disk is already set up in the stripes
6195 * array.
6196 */
6197 index_where_to_add = num_stripes;
6198 for (i = 0; i < num_stripes; i++) {
6199 if (bioc->stripes[i].dev->devid == srcdev_devid) {
6200 /* write to new disk, too */
6201 struct btrfs_io_stripe *new =
6202 bioc->stripes + index_where_to_add;
6203 struct btrfs_io_stripe *old =
6204 bioc->stripes + i;
6205
6206 new->physical = old->physical;
6207 new->length = old->length;
6208 new->dev = dev_replace->tgtdev;
6209 bioc->tgtdev_map[i] = index_where_to_add;
6210 index_where_to_add++;
6211 max_errors++;
6212 tgtdev_indexes++;
6213 }
6214 }
6215 num_stripes = index_where_to_add;
6216 } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
6217 int index_srcdev = 0;
6218 int found = 0;
6219 u64 physical_of_found = 0;
6220
6221 /*
6222 * During the dev-replace procedure, the target drive can also
6223 * be used to read data in case it is needed to repair a corrupt
6224 * block elsewhere. This is possible if the requested area is
6225 * left of the left cursor. In this area, the target drive is a
6226 * full copy of the source drive.
6227 */
6228 for (i = 0; i < num_stripes; i++) {
6229 if (bioc->stripes[i].dev->devid == srcdev_devid) {
6230 /*
6231 * In case of DUP, in order to keep it simple,
6232 * only add the mirror with the lowest physical
6233 * address
6234 */
6235 if (found &&
6236 physical_of_found <= bioc->stripes[i].physical)
6237 continue;
6238 index_srcdev = i;
6239 found = 1;
6240 physical_of_found = bioc->stripes[i].physical;
6241 }
6242 }
6243 if (found) {
6244 struct btrfs_io_stripe *tgtdev_stripe =
6245 bioc->stripes + num_stripes;
6246
6247 tgtdev_stripe->physical = physical_of_found;
6248 tgtdev_stripe->length =
6249 bioc->stripes[index_srcdev].length;
6250 tgtdev_stripe->dev = dev_replace->tgtdev;
6251 bioc->tgtdev_map[index_srcdev] = num_stripes;
6252
6253 tgtdev_indexes++;
6254 num_stripes++;
6255 }
6256 }
6257
6258 *num_stripes_ret = num_stripes;
6259 *max_errors_ret = max_errors;
6260 bioc->num_tgtdevs = tgtdev_indexes;
6261 *bioc_ret = bioc;
6262 }
6263
need_full_stripe(enum btrfs_map_op op)6264 static bool need_full_stripe(enum btrfs_map_op op)
6265 {
6266 return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
6267 }
6268
6269 /*
6270 * Calculate the geometry of a particular (address, len) tuple. This
6271 * information is used to calculate how big a particular bio can get before it
6272 * straddles a stripe.
6273 *
6274 * @fs_info: the filesystem
6275 * @em: mapping containing the logical extent
6276 * @op: type of operation - write or read
6277 * @logical: address that we want to figure out the geometry of
6278 * @io_geom: pointer used to return values
6279 *
6280 * Returns < 0 in case a chunk for the given logical address cannot be found,
6281 * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6282 */
btrfs_get_io_geometry(struct btrfs_fs_info * fs_info,struct extent_map * em,enum btrfs_map_op op,u64 logical,struct btrfs_io_geometry * io_geom)6283 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
6284 enum btrfs_map_op op, u64 logical,
6285 struct btrfs_io_geometry *io_geom)
6286 {
6287 struct map_lookup *map;
6288 u64 len;
6289 u64 offset;
6290 u64 stripe_offset;
6291 u64 stripe_nr;
6292 u32 stripe_len;
6293 u64 raid56_full_stripe_start = (u64)-1;
6294 int data_stripes;
6295
6296 ASSERT(op != BTRFS_MAP_DISCARD);
6297
6298 map = em->map_lookup;
6299 /* Offset of this logical address in the chunk */
6300 offset = logical - em->start;
6301 /* Len of a stripe in a chunk */
6302 stripe_len = map->stripe_len;
6303 /*
6304 * Stripe_nr is where this block falls in
6305 * stripe_offset is the offset of this block in its stripe.
6306 */
6307 stripe_nr = div64_u64_rem(offset, stripe_len, &stripe_offset);
6308 ASSERT(stripe_offset < U32_MAX);
6309
6310 data_stripes = nr_data_stripes(map);
6311
6312 /* Only stripe based profiles needs to check against stripe length. */
6313 if (map->type & BTRFS_BLOCK_GROUP_STRIPE_MASK) {
6314 u64 max_len = stripe_len - stripe_offset;
6315
6316 /*
6317 * In case of raid56, we need to know the stripe aligned start
6318 */
6319 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6320 unsigned long full_stripe_len = stripe_len * data_stripes;
6321 raid56_full_stripe_start = offset;
6322
6323 /*
6324 * Allow a write of a full stripe, but make sure we
6325 * don't allow straddling of stripes
6326 */
6327 raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6328 full_stripe_len);
6329 raid56_full_stripe_start *= full_stripe_len;
6330
6331 /*
6332 * For writes to RAID[56], allow a full stripeset across
6333 * all disks. For other RAID types and for RAID[56]
6334 * reads, just allow a single stripe (on a single disk).
6335 */
6336 if (op == BTRFS_MAP_WRITE) {
6337 max_len = stripe_len * data_stripes -
6338 (offset - raid56_full_stripe_start);
6339 }
6340 }
6341 len = min_t(u64, em->len - offset, max_len);
6342 } else {
6343 len = em->len - offset;
6344 }
6345
6346 io_geom->len = len;
6347 io_geom->offset = offset;
6348 io_geom->stripe_len = stripe_len;
6349 io_geom->stripe_nr = stripe_nr;
6350 io_geom->stripe_offset = stripe_offset;
6351 io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6352
6353 return 0;
6354 }
6355
__btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_io_context ** bioc_ret,int mirror_num,int need_raid_map)6356 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6357 enum btrfs_map_op op,
6358 u64 logical, u64 *length,
6359 struct btrfs_io_context **bioc_ret,
6360 int mirror_num, int need_raid_map)
6361 {
6362 struct extent_map *em;
6363 struct map_lookup *map;
6364 u64 stripe_offset;
6365 u64 stripe_nr;
6366 u64 stripe_len;
6367 u32 stripe_index;
6368 int data_stripes;
6369 int i;
6370 int ret = 0;
6371 int num_stripes;
6372 int max_errors = 0;
6373 int tgtdev_indexes = 0;
6374 struct btrfs_io_context *bioc = NULL;
6375 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6376 int dev_replace_is_ongoing = 0;
6377 int num_alloc_stripes;
6378 int patch_the_first_stripe_for_dev_replace = 0;
6379 u64 physical_to_patch_in_first_stripe = 0;
6380 u64 raid56_full_stripe_start = (u64)-1;
6381 struct btrfs_io_geometry geom;
6382
6383 ASSERT(bioc_ret);
6384 ASSERT(op != BTRFS_MAP_DISCARD);
6385
6386 em = btrfs_get_chunk_map(fs_info, logical, *length);
6387 ASSERT(!IS_ERR(em));
6388
6389 ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom);
6390 if (ret < 0)
6391 return ret;
6392
6393 map = em->map_lookup;
6394
6395 *length = geom.len;
6396 stripe_len = geom.stripe_len;
6397 stripe_nr = geom.stripe_nr;
6398 stripe_offset = geom.stripe_offset;
6399 raid56_full_stripe_start = geom.raid56_stripe_offset;
6400 data_stripes = nr_data_stripes(map);
6401
6402 down_read(&dev_replace->rwsem);
6403 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6404 /*
6405 * Hold the semaphore for read during the whole operation, write is
6406 * requested at commit time but must wait.
6407 */
6408 if (!dev_replace_is_ongoing)
6409 up_read(&dev_replace->rwsem);
6410
6411 if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6412 !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6413 ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6414 dev_replace->srcdev->devid,
6415 &mirror_num,
6416 &physical_to_patch_in_first_stripe);
6417 if (ret)
6418 goto out;
6419 else
6420 patch_the_first_stripe_for_dev_replace = 1;
6421 } else if (mirror_num > map->num_stripes) {
6422 mirror_num = 0;
6423 }
6424
6425 num_stripes = 1;
6426 stripe_index = 0;
6427 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6428 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6429 &stripe_index);
6430 if (!need_full_stripe(op))
6431 mirror_num = 1;
6432 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6433 if (need_full_stripe(op))
6434 num_stripes = map->num_stripes;
6435 else if (mirror_num)
6436 stripe_index = mirror_num - 1;
6437 else {
6438 stripe_index = find_live_mirror(fs_info, map, 0,
6439 dev_replace_is_ongoing);
6440 mirror_num = stripe_index + 1;
6441 }
6442
6443 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6444 if (need_full_stripe(op)) {
6445 num_stripes = map->num_stripes;
6446 } else if (mirror_num) {
6447 stripe_index = mirror_num - 1;
6448 } else {
6449 mirror_num = 1;
6450 }
6451
6452 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6453 u32 factor = map->num_stripes / map->sub_stripes;
6454
6455 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6456 stripe_index *= map->sub_stripes;
6457
6458 if (need_full_stripe(op))
6459 num_stripes = map->sub_stripes;
6460 else if (mirror_num)
6461 stripe_index += mirror_num - 1;
6462 else {
6463 int old_stripe_index = stripe_index;
6464 stripe_index = find_live_mirror(fs_info, map,
6465 stripe_index,
6466 dev_replace_is_ongoing);
6467 mirror_num = stripe_index - old_stripe_index + 1;
6468 }
6469
6470 } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6471 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6472 /* push stripe_nr back to the start of the full stripe */
6473 stripe_nr = div64_u64(raid56_full_stripe_start,
6474 stripe_len * data_stripes);
6475
6476 /* RAID[56] write or recovery. Return all stripes */
6477 num_stripes = map->num_stripes;
6478 max_errors = nr_parity_stripes(map);
6479
6480 *length = map->stripe_len;
6481 stripe_index = 0;
6482 stripe_offset = 0;
6483 } else {
6484 /*
6485 * Mirror #0 or #1 means the original data block.
6486 * Mirror #2 is RAID5 parity block.
6487 * Mirror #3 is RAID6 Q block.
6488 */
6489 stripe_nr = div_u64_rem(stripe_nr,
6490 data_stripes, &stripe_index);
6491 if (mirror_num > 1)
6492 stripe_index = data_stripes + mirror_num - 2;
6493
6494 /* We distribute the parity blocks across stripes */
6495 div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6496 &stripe_index);
6497 if (!need_full_stripe(op) && mirror_num <= 1)
6498 mirror_num = 1;
6499 }
6500 } else {
6501 /*
6502 * after this, stripe_nr is the number of stripes on this
6503 * device we have to walk to find the data, and stripe_index is
6504 * the number of our device in the stripe array
6505 */
6506 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6507 &stripe_index);
6508 mirror_num = stripe_index + 1;
6509 }
6510 if (stripe_index >= map->num_stripes) {
6511 btrfs_crit(fs_info,
6512 "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6513 stripe_index, map->num_stripes);
6514 ret = -EINVAL;
6515 goto out;
6516 }
6517
6518 num_alloc_stripes = num_stripes;
6519 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6520 if (op == BTRFS_MAP_WRITE)
6521 num_alloc_stripes <<= 1;
6522 if (op == BTRFS_MAP_GET_READ_MIRRORS)
6523 num_alloc_stripes++;
6524 tgtdev_indexes = num_stripes;
6525 }
6526
6527 bioc = alloc_btrfs_io_context(fs_info, num_alloc_stripes, tgtdev_indexes);
6528 if (!bioc) {
6529 ret = -ENOMEM;
6530 goto out;
6531 }
6532
6533 for (i = 0; i < num_stripes; i++) {
6534 bioc->stripes[i].physical = map->stripes[stripe_index].physical +
6535 stripe_offset + stripe_nr * map->stripe_len;
6536 bioc->stripes[i].dev = map->stripes[stripe_index].dev;
6537 stripe_index++;
6538 }
6539
6540 /* Build raid_map */
6541 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6542 (need_full_stripe(op) || mirror_num > 1)) {
6543 u64 tmp;
6544 unsigned rot;
6545
6546 /* Work out the disk rotation on this stripe-set */
6547 div_u64_rem(stripe_nr, num_stripes, &rot);
6548
6549 /* Fill in the logical address of each stripe */
6550 tmp = stripe_nr * data_stripes;
6551 for (i = 0; i < data_stripes; i++)
6552 bioc->raid_map[(i + rot) % num_stripes] =
6553 em->start + (tmp + i) * map->stripe_len;
6554
6555 bioc->raid_map[(i + rot) % map->num_stripes] = RAID5_P_STRIPE;
6556 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6557 bioc->raid_map[(i + rot + 1) % num_stripes] =
6558 RAID6_Q_STRIPE;
6559
6560 sort_parity_stripes(bioc, num_stripes);
6561 }
6562
6563 if (need_full_stripe(op))
6564 max_errors = btrfs_chunk_max_errors(map);
6565
6566 if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6567 need_full_stripe(op)) {
6568 handle_ops_on_dev_replace(op, &bioc, dev_replace, logical,
6569 &num_stripes, &max_errors);
6570 }
6571
6572 *bioc_ret = bioc;
6573 bioc->map_type = map->type;
6574 bioc->num_stripes = num_stripes;
6575 bioc->max_errors = max_errors;
6576 bioc->mirror_num = mirror_num;
6577
6578 /*
6579 * this is the case that REQ_READ && dev_replace_is_ongoing &&
6580 * mirror_num == num_stripes + 1 && dev_replace target drive is
6581 * available as a mirror
6582 */
6583 if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6584 WARN_ON(num_stripes > 1);
6585 bioc->stripes[0].dev = dev_replace->tgtdev;
6586 bioc->stripes[0].physical = physical_to_patch_in_first_stripe;
6587 bioc->mirror_num = map->num_stripes + 1;
6588 }
6589 out:
6590 if (dev_replace_is_ongoing) {
6591 lockdep_assert_held(&dev_replace->rwsem);
6592 /* Unlock and let waiting writers proceed */
6593 up_read(&dev_replace->rwsem);
6594 }
6595 free_extent_map(em);
6596 return ret;
6597 }
6598
btrfs_map_block(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_io_context ** bioc_ret,int mirror_num)6599 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6600 u64 logical, u64 *length,
6601 struct btrfs_io_context **bioc_ret, int mirror_num)
6602 {
6603 if (op == BTRFS_MAP_DISCARD)
6604 return __btrfs_map_block_for_discard(fs_info, logical,
6605 length, bioc_ret);
6606
6607 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret,
6608 mirror_num, 0);
6609 }
6610
6611 /* For Scrub/replace */
btrfs_map_sblock(struct btrfs_fs_info * fs_info,enum btrfs_map_op op,u64 logical,u64 * length,struct btrfs_io_context ** bioc_ret)6612 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6613 u64 logical, u64 *length,
6614 struct btrfs_io_context **bioc_ret)
6615 {
6616 return __btrfs_map_block(fs_info, op, logical, length, bioc_ret, 0, 1);
6617 }
6618
btrfs_end_bioc(struct btrfs_io_context * bioc,struct bio * bio)6619 static inline void btrfs_end_bioc(struct btrfs_io_context *bioc, struct bio *bio)
6620 {
6621 bio->bi_private = bioc->private;
6622 bio->bi_end_io = bioc->end_io;
6623 bio_endio(bio);
6624
6625 btrfs_put_bioc(bioc);
6626 }
6627
btrfs_end_bio(struct bio * bio)6628 static void btrfs_end_bio(struct bio *bio)
6629 {
6630 struct btrfs_io_context *bioc = bio->bi_private;
6631 int is_orig_bio = 0;
6632
6633 if (bio->bi_status) {
6634 atomic_inc(&bioc->error);
6635 if (bio->bi_status == BLK_STS_IOERR ||
6636 bio->bi_status == BLK_STS_TARGET) {
6637 struct btrfs_device *dev = btrfs_bio(bio)->device;
6638
6639 ASSERT(dev->bdev);
6640 if (btrfs_op(bio) == BTRFS_MAP_WRITE)
6641 btrfs_dev_stat_inc_and_print(dev,
6642 BTRFS_DEV_STAT_WRITE_ERRS);
6643 else if (!(bio->bi_opf & REQ_RAHEAD))
6644 btrfs_dev_stat_inc_and_print(dev,
6645 BTRFS_DEV_STAT_READ_ERRS);
6646 if (bio->bi_opf & REQ_PREFLUSH)
6647 btrfs_dev_stat_inc_and_print(dev,
6648 BTRFS_DEV_STAT_FLUSH_ERRS);
6649 }
6650 }
6651
6652 if (bio == bioc->orig_bio)
6653 is_orig_bio = 1;
6654
6655 btrfs_bio_counter_dec(bioc->fs_info);
6656
6657 if (atomic_dec_and_test(&bioc->stripes_pending)) {
6658 if (!is_orig_bio) {
6659 bio_put(bio);
6660 bio = bioc->orig_bio;
6661 }
6662
6663 btrfs_bio(bio)->mirror_num = bioc->mirror_num;
6664 /* only send an error to the higher layers if it is
6665 * beyond the tolerance of the btrfs bio
6666 */
6667 if (atomic_read(&bioc->error) > bioc->max_errors) {
6668 bio->bi_status = BLK_STS_IOERR;
6669 } else {
6670 /*
6671 * this bio is actually up to date, we didn't
6672 * go over the max number of errors
6673 */
6674 bio->bi_status = BLK_STS_OK;
6675 }
6676
6677 btrfs_end_bioc(bioc, bio);
6678 } else if (!is_orig_bio) {
6679 bio_put(bio);
6680 }
6681 }
6682
submit_stripe_bio(struct btrfs_io_context * bioc,struct bio * bio,u64 physical,struct btrfs_device * dev)6683 static void submit_stripe_bio(struct btrfs_io_context *bioc, struct bio *bio,
6684 u64 physical, struct btrfs_device *dev)
6685 {
6686 struct btrfs_fs_info *fs_info = bioc->fs_info;
6687
6688 bio->bi_private = bioc;
6689 btrfs_bio(bio)->device = dev;
6690 bio->bi_end_io = btrfs_end_bio;
6691 bio->bi_iter.bi_sector = physical >> 9;
6692 /*
6693 * For zone append writing, bi_sector must point the beginning of the
6694 * zone
6695 */
6696 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
6697 if (btrfs_dev_is_sequential(dev, physical)) {
6698 u64 zone_start = round_down(physical, fs_info->zone_size);
6699
6700 bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
6701 } else {
6702 bio->bi_opf &= ~REQ_OP_ZONE_APPEND;
6703 bio->bi_opf |= REQ_OP_WRITE;
6704 }
6705 }
6706 btrfs_debug_in_rcu(fs_info,
6707 "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6708 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
6709 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6710 dev->devid, bio->bi_iter.bi_size);
6711
6712 btrfs_bio_counter_inc_noblocked(fs_info);
6713
6714 btrfsic_check_bio(bio);
6715 submit_bio(bio);
6716 }
6717
bioc_error(struct btrfs_io_context * bioc,struct bio * bio,u64 logical)6718 static void bioc_error(struct btrfs_io_context *bioc, struct bio *bio, u64 logical)
6719 {
6720 atomic_inc(&bioc->error);
6721 if (atomic_dec_and_test(&bioc->stripes_pending)) {
6722 /* Should be the original bio. */
6723 WARN_ON(bio != bioc->orig_bio);
6724
6725 btrfs_bio(bio)->mirror_num = bioc->mirror_num;
6726 bio->bi_iter.bi_sector = logical >> 9;
6727 if (atomic_read(&bioc->error) > bioc->max_errors)
6728 bio->bi_status = BLK_STS_IOERR;
6729 else
6730 bio->bi_status = BLK_STS_OK;
6731 btrfs_end_bioc(bioc, bio);
6732 }
6733 }
6734
btrfs_map_bio(struct btrfs_fs_info * fs_info,struct bio * bio,int mirror_num)6735 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6736 int mirror_num)
6737 {
6738 struct btrfs_device *dev;
6739 struct bio *first_bio = bio;
6740 u64 logical = bio->bi_iter.bi_sector << 9;
6741 u64 length = 0;
6742 u64 map_length;
6743 int ret;
6744 int dev_nr;
6745 int total_devs;
6746 struct btrfs_io_context *bioc = NULL;
6747
6748 length = bio->bi_iter.bi_size;
6749 map_length = length;
6750
6751 btrfs_bio_counter_inc_blocked(fs_info);
6752 ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6753 &map_length, &bioc, mirror_num, 1);
6754 if (ret) {
6755 btrfs_bio_counter_dec(fs_info);
6756 return errno_to_blk_status(ret);
6757 }
6758
6759 total_devs = bioc->num_stripes;
6760 bioc->orig_bio = first_bio;
6761 bioc->private = first_bio->bi_private;
6762 bioc->end_io = first_bio->bi_end_io;
6763 atomic_set(&bioc->stripes_pending, bioc->num_stripes);
6764
6765 if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6766 ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
6767 /* In this case, map_length has been set to the length of
6768 a single stripe; not the whole write */
6769 if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
6770 ret = raid56_parity_write(bio, bioc, map_length);
6771 } else {
6772 ret = raid56_parity_recover(bio, bioc, map_length,
6773 mirror_num, 1);
6774 }
6775
6776 btrfs_bio_counter_dec(fs_info);
6777 return errno_to_blk_status(ret);
6778 }
6779
6780 if (map_length < length) {
6781 btrfs_crit(fs_info,
6782 "mapping failed logical %llu bio len %llu len %llu",
6783 logical, length, map_length);
6784 BUG();
6785 }
6786
6787 for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6788 dev = bioc->stripes[dev_nr].dev;
6789 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6790 &dev->dev_state) ||
6791 (btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
6792 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6793 bioc_error(bioc, first_bio, logical);
6794 continue;
6795 }
6796
6797 if (dev_nr < total_devs - 1) {
6798 bio = btrfs_bio_clone(dev->bdev, first_bio);
6799 } else {
6800 bio = first_bio;
6801 bio_set_dev(bio, dev->bdev);
6802 }
6803
6804 submit_stripe_bio(bioc, bio, bioc->stripes[dev_nr].physical, dev);
6805 }
6806 btrfs_bio_counter_dec(fs_info);
6807 return BLK_STS_OK;
6808 }
6809
dev_args_match_fs_devices(const struct btrfs_dev_lookup_args * args,const struct btrfs_fs_devices * fs_devices)6810 static bool dev_args_match_fs_devices(const struct btrfs_dev_lookup_args *args,
6811 const struct btrfs_fs_devices *fs_devices)
6812 {
6813 if (args->fsid == NULL)
6814 return true;
6815 if (memcmp(fs_devices->metadata_uuid, args->fsid, BTRFS_FSID_SIZE) == 0)
6816 return true;
6817 return false;
6818 }
6819
dev_args_match_device(const struct btrfs_dev_lookup_args * args,const struct btrfs_device * device)6820 static bool dev_args_match_device(const struct btrfs_dev_lookup_args *args,
6821 const struct btrfs_device *device)
6822 {
6823 ASSERT((args->devid != (u64)-1) || args->missing);
6824
6825 if ((args->devid != (u64)-1) && device->devid != args->devid)
6826 return false;
6827 if (args->uuid && memcmp(device->uuid, args->uuid, BTRFS_UUID_SIZE) != 0)
6828 return false;
6829 if (!args->missing)
6830 return true;
6831 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state) &&
6832 !device->bdev)
6833 return true;
6834 return false;
6835 }
6836
6837 /*
6838 * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6839 * return NULL.
6840 *
6841 * If devid and uuid are both specified, the match must be exact, otherwise
6842 * only devid is used.
6843 */
btrfs_find_device(const struct btrfs_fs_devices * fs_devices,const struct btrfs_dev_lookup_args * args)6844 struct btrfs_device *btrfs_find_device(const struct btrfs_fs_devices *fs_devices,
6845 const struct btrfs_dev_lookup_args *args)
6846 {
6847 struct btrfs_device *device;
6848 struct btrfs_fs_devices *seed_devs;
6849
6850 if (dev_args_match_fs_devices(args, fs_devices)) {
6851 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6852 if (dev_args_match_device(args, device))
6853 return device;
6854 }
6855 }
6856
6857 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6858 if (!dev_args_match_fs_devices(args, seed_devs))
6859 continue;
6860 list_for_each_entry(device, &seed_devs->devices, dev_list) {
6861 if (dev_args_match_device(args, device))
6862 return device;
6863 }
6864 }
6865
6866 return NULL;
6867 }
6868
add_missing_dev(struct btrfs_fs_devices * fs_devices,u64 devid,u8 * dev_uuid)6869 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6870 u64 devid, u8 *dev_uuid)
6871 {
6872 struct btrfs_device *device;
6873 unsigned int nofs_flag;
6874
6875 /*
6876 * We call this under the chunk_mutex, so we want to use NOFS for this
6877 * allocation, however we don't want to change btrfs_alloc_device() to
6878 * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6879 * places.
6880 */
6881 nofs_flag = memalloc_nofs_save();
6882 device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6883 memalloc_nofs_restore(nofs_flag);
6884 if (IS_ERR(device))
6885 return device;
6886
6887 list_add(&device->dev_list, &fs_devices->devices);
6888 device->fs_devices = fs_devices;
6889 fs_devices->num_devices++;
6890
6891 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6892 fs_devices->missing_devices++;
6893
6894 return device;
6895 }
6896
6897 /**
6898 * btrfs_alloc_device - allocate struct btrfs_device
6899 * @fs_info: used only for generating a new devid, can be NULL if
6900 * devid is provided (i.e. @devid != NULL).
6901 * @devid: a pointer to devid for this device. If NULL a new devid
6902 * is generated.
6903 * @uuid: a pointer to UUID for this device. If NULL a new UUID
6904 * is generated.
6905 *
6906 * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6907 * on error. Returned struct is not linked onto any lists and must be
6908 * destroyed with btrfs_free_device.
6909 */
btrfs_alloc_device(struct btrfs_fs_info * fs_info,const u64 * devid,const u8 * uuid)6910 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6911 const u64 *devid,
6912 const u8 *uuid)
6913 {
6914 struct btrfs_device *dev;
6915 u64 tmp;
6916
6917 if (WARN_ON(!devid && !fs_info))
6918 return ERR_PTR(-EINVAL);
6919
6920 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
6921 if (!dev)
6922 return ERR_PTR(-ENOMEM);
6923
6924 INIT_LIST_HEAD(&dev->dev_list);
6925 INIT_LIST_HEAD(&dev->dev_alloc_list);
6926 INIT_LIST_HEAD(&dev->post_commit_list);
6927
6928 atomic_set(&dev->dev_stats_ccnt, 0);
6929 btrfs_device_data_ordered_init(dev);
6930 extent_io_tree_init(fs_info, &dev->alloc_state,
6931 IO_TREE_DEVICE_ALLOC_STATE, NULL);
6932
6933 if (devid)
6934 tmp = *devid;
6935 else {
6936 int ret;
6937
6938 ret = find_next_devid(fs_info, &tmp);
6939 if (ret) {
6940 btrfs_free_device(dev);
6941 return ERR_PTR(ret);
6942 }
6943 }
6944 dev->devid = tmp;
6945
6946 if (uuid)
6947 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6948 else
6949 generate_random_uuid(dev->uuid);
6950
6951 return dev;
6952 }
6953
btrfs_report_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid,bool error)6954 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6955 u64 devid, u8 *uuid, bool error)
6956 {
6957 if (error)
6958 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6959 devid, uuid);
6960 else
6961 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6962 devid, uuid);
6963 }
6964
calc_stripe_length(u64 type,u64 chunk_len,int num_stripes)6965 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6966 {
6967 const int data_stripes = calc_data_stripes(type, num_stripes);
6968
6969 return div_u64(chunk_len, data_stripes);
6970 }
6971
6972 #if BITS_PER_LONG == 32
6973 /*
6974 * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
6975 * can't be accessed on 32bit systems.
6976 *
6977 * This function do mount time check to reject the fs if it already has
6978 * metadata chunk beyond that limit.
6979 */
check_32bit_meta_chunk(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 type)6980 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6981 u64 logical, u64 length, u64 type)
6982 {
6983 if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6984 return 0;
6985
6986 if (logical + length < MAX_LFS_FILESIZE)
6987 return 0;
6988
6989 btrfs_err_32bit_limit(fs_info);
6990 return -EOVERFLOW;
6991 }
6992
6993 /*
6994 * This is to give early warning for any metadata chunk reaching
6995 * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
6996 * Although we can still access the metadata, it's not going to be possible
6997 * once the limit is reached.
6998 */
warn_32bit_meta_chunk(struct btrfs_fs_info * fs_info,u64 logical,u64 length,u64 type)6999 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
7000 u64 logical, u64 length, u64 type)
7001 {
7002 if (!(type & BTRFS_BLOCK_GROUP_METADATA))
7003 return;
7004
7005 if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD)
7006 return;
7007
7008 btrfs_warn_32bit_limit(fs_info);
7009 }
7010 #endif
7011
handle_missing_device(struct btrfs_fs_info * fs_info,u64 devid,u8 * uuid)7012 static struct btrfs_device *handle_missing_device(struct btrfs_fs_info *fs_info,
7013 u64 devid, u8 *uuid)
7014 {
7015 struct btrfs_device *dev;
7016
7017 if (!btrfs_test_opt(fs_info, DEGRADED)) {
7018 btrfs_report_missing_device(fs_info, devid, uuid, true);
7019 return ERR_PTR(-ENOENT);
7020 }
7021
7022 dev = add_missing_dev(fs_info->fs_devices, devid, uuid);
7023 if (IS_ERR(dev)) {
7024 btrfs_err(fs_info, "failed to init missing device %llu: %ld",
7025 devid, PTR_ERR(dev));
7026 return dev;
7027 }
7028 btrfs_report_missing_device(fs_info, devid, uuid, false);
7029
7030 return dev;
7031 }
7032
read_one_chunk(struct btrfs_key * key,struct extent_buffer * leaf,struct btrfs_chunk * chunk)7033 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
7034 struct btrfs_chunk *chunk)
7035 {
7036 BTRFS_DEV_LOOKUP_ARGS(args);
7037 struct btrfs_fs_info *fs_info = leaf->fs_info;
7038 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7039 struct map_lookup *map;
7040 struct extent_map *em;
7041 u64 logical;
7042 u64 length;
7043 u64 devid;
7044 u64 type;
7045 u8 uuid[BTRFS_UUID_SIZE];
7046 int num_stripes;
7047 int ret;
7048 int i;
7049
7050 logical = key->offset;
7051 length = btrfs_chunk_length(leaf, chunk);
7052 type = btrfs_chunk_type(leaf, chunk);
7053 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
7054
7055 #if BITS_PER_LONG == 32
7056 ret = check_32bit_meta_chunk(fs_info, logical, length, type);
7057 if (ret < 0)
7058 return ret;
7059 warn_32bit_meta_chunk(fs_info, logical, length, type);
7060 #endif
7061
7062 /*
7063 * Only need to verify chunk item if we're reading from sys chunk array,
7064 * as chunk item in tree block is already verified by tree-checker.
7065 */
7066 if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
7067 ret = btrfs_check_chunk_valid(leaf, chunk, logical);
7068 if (ret)
7069 return ret;
7070 }
7071
7072 read_lock(&map_tree->lock);
7073 em = lookup_extent_mapping(map_tree, logical, 1);
7074 read_unlock(&map_tree->lock);
7075
7076 /* already mapped? */
7077 if (em && em->start <= logical && em->start + em->len > logical) {
7078 free_extent_map(em);
7079 return 0;
7080 } else if (em) {
7081 free_extent_map(em);
7082 }
7083
7084 em = alloc_extent_map();
7085 if (!em)
7086 return -ENOMEM;
7087 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
7088 if (!map) {
7089 free_extent_map(em);
7090 return -ENOMEM;
7091 }
7092
7093 set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
7094 em->map_lookup = map;
7095 em->start = logical;
7096 em->len = length;
7097 em->orig_start = 0;
7098 em->block_start = 0;
7099 em->block_len = em->len;
7100
7101 map->num_stripes = num_stripes;
7102 map->io_width = btrfs_chunk_io_width(leaf, chunk);
7103 map->io_align = btrfs_chunk_io_align(leaf, chunk);
7104 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
7105 map->type = type;
7106 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
7107 map->verified_stripes = 0;
7108 em->orig_block_len = calc_stripe_length(type, em->len,
7109 map->num_stripes);
7110 for (i = 0; i < num_stripes; i++) {
7111 map->stripes[i].physical =
7112 btrfs_stripe_offset_nr(leaf, chunk, i);
7113 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
7114 args.devid = devid;
7115 read_extent_buffer(leaf, uuid, (unsigned long)
7116 btrfs_stripe_dev_uuid_nr(chunk, i),
7117 BTRFS_UUID_SIZE);
7118 args.uuid = uuid;
7119 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, &args);
7120 if (!map->stripes[i].dev) {
7121 map->stripes[i].dev = handle_missing_device(fs_info,
7122 devid, uuid);
7123 if (IS_ERR(map->stripes[i].dev)) {
7124 free_extent_map(em);
7125 return PTR_ERR(map->stripes[i].dev);
7126 }
7127 }
7128
7129 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
7130 &(map->stripes[i].dev->dev_state));
7131 }
7132
7133 write_lock(&map_tree->lock);
7134 ret = add_extent_mapping(map_tree, em, 0);
7135 write_unlock(&map_tree->lock);
7136 if (ret < 0) {
7137 btrfs_err(fs_info,
7138 "failed to add chunk map, start=%llu len=%llu: %d",
7139 em->start, em->len, ret);
7140 }
7141 free_extent_map(em);
7142
7143 return ret;
7144 }
7145
fill_device_from_item(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item,struct btrfs_device * device)7146 static void fill_device_from_item(struct extent_buffer *leaf,
7147 struct btrfs_dev_item *dev_item,
7148 struct btrfs_device *device)
7149 {
7150 unsigned long ptr;
7151
7152 device->devid = btrfs_device_id(leaf, dev_item);
7153 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
7154 device->total_bytes = device->disk_total_bytes;
7155 device->commit_total_bytes = device->disk_total_bytes;
7156 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
7157 device->commit_bytes_used = device->bytes_used;
7158 device->type = btrfs_device_type(leaf, dev_item);
7159 device->io_align = btrfs_device_io_align(leaf, dev_item);
7160 device->io_width = btrfs_device_io_width(leaf, dev_item);
7161 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
7162 WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
7163 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
7164
7165 ptr = btrfs_device_uuid(dev_item);
7166 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
7167 }
7168
open_seed_devices(struct btrfs_fs_info * fs_info,u8 * fsid)7169 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
7170 u8 *fsid)
7171 {
7172 struct btrfs_fs_devices *fs_devices;
7173 int ret;
7174
7175 lockdep_assert_held(&uuid_mutex);
7176 ASSERT(fsid);
7177
7178 /* This will match only for multi-device seed fs */
7179 list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
7180 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
7181 return fs_devices;
7182
7183
7184 fs_devices = find_fsid(fsid, NULL);
7185 if (!fs_devices) {
7186 if (!btrfs_test_opt(fs_info, DEGRADED))
7187 return ERR_PTR(-ENOENT);
7188
7189 fs_devices = alloc_fs_devices(fsid, NULL);
7190 if (IS_ERR(fs_devices))
7191 return fs_devices;
7192
7193 fs_devices->seeding = true;
7194 fs_devices->opened = 1;
7195 return fs_devices;
7196 }
7197
7198 /*
7199 * Upon first call for a seed fs fsid, just create a private copy of the
7200 * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7201 */
7202 fs_devices = clone_fs_devices(fs_devices);
7203 if (IS_ERR(fs_devices))
7204 return fs_devices;
7205
7206 ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
7207 if (ret) {
7208 free_fs_devices(fs_devices);
7209 return ERR_PTR(ret);
7210 }
7211
7212 if (!fs_devices->seeding) {
7213 close_fs_devices(fs_devices);
7214 free_fs_devices(fs_devices);
7215 return ERR_PTR(-EINVAL);
7216 }
7217
7218 list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
7219
7220 return fs_devices;
7221 }
7222
read_one_dev(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item)7223 static int read_one_dev(struct extent_buffer *leaf,
7224 struct btrfs_dev_item *dev_item)
7225 {
7226 BTRFS_DEV_LOOKUP_ARGS(args);
7227 struct btrfs_fs_info *fs_info = leaf->fs_info;
7228 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7229 struct btrfs_device *device;
7230 u64 devid;
7231 int ret;
7232 u8 fs_uuid[BTRFS_FSID_SIZE];
7233 u8 dev_uuid[BTRFS_UUID_SIZE];
7234
7235 devid = args.devid = btrfs_device_id(leaf, dev_item);
7236 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
7237 BTRFS_UUID_SIZE);
7238 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
7239 BTRFS_FSID_SIZE);
7240 args.uuid = dev_uuid;
7241 args.fsid = fs_uuid;
7242
7243 if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
7244 fs_devices = open_seed_devices(fs_info, fs_uuid);
7245 if (IS_ERR(fs_devices))
7246 return PTR_ERR(fs_devices);
7247 }
7248
7249 device = btrfs_find_device(fs_info->fs_devices, &args);
7250 if (!device) {
7251 if (!btrfs_test_opt(fs_info, DEGRADED)) {
7252 btrfs_report_missing_device(fs_info, devid,
7253 dev_uuid, true);
7254 return -ENOENT;
7255 }
7256
7257 device = add_missing_dev(fs_devices, devid, dev_uuid);
7258 if (IS_ERR(device)) {
7259 btrfs_err(fs_info,
7260 "failed to add missing dev %llu: %ld",
7261 devid, PTR_ERR(device));
7262 return PTR_ERR(device);
7263 }
7264 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
7265 } else {
7266 if (!device->bdev) {
7267 if (!btrfs_test_opt(fs_info, DEGRADED)) {
7268 btrfs_report_missing_device(fs_info,
7269 devid, dev_uuid, true);
7270 return -ENOENT;
7271 }
7272 btrfs_report_missing_device(fs_info, devid,
7273 dev_uuid, false);
7274 }
7275
7276 if (!device->bdev &&
7277 !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
7278 /*
7279 * this happens when a device that was properly setup
7280 * in the device info lists suddenly goes bad.
7281 * device->bdev is NULL, and so we have to set
7282 * device->missing to one here
7283 */
7284 device->fs_devices->missing_devices++;
7285 set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7286 }
7287
7288 /* Move the device to its own fs_devices */
7289 if (device->fs_devices != fs_devices) {
7290 ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
7291 &device->dev_state));
7292
7293 list_move(&device->dev_list, &fs_devices->devices);
7294 device->fs_devices->num_devices--;
7295 fs_devices->num_devices++;
7296
7297 device->fs_devices->missing_devices--;
7298 fs_devices->missing_devices++;
7299
7300 device->fs_devices = fs_devices;
7301 }
7302 }
7303
7304 if (device->fs_devices != fs_info->fs_devices) {
7305 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
7306 if (device->generation !=
7307 btrfs_device_generation(leaf, dev_item))
7308 return -EINVAL;
7309 }
7310
7311 fill_device_from_item(leaf, dev_item, device);
7312 if (device->bdev) {
7313 u64 max_total_bytes = bdev_nr_bytes(device->bdev);
7314
7315 if (device->total_bytes > max_total_bytes) {
7316 btrfs_err(fs_info,
7317 "device total_bytes should be at most %llu but found %llu",
7318 max_total_bytes, device->total_bytes);
7319 return -EINVAL;
7320 }
7321 }
7322 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
7323 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
7324 !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
7325 device->fs_devices->total_rw_bytes += device->total_bytes;
7326 atomic64_add(device->total_bytes - device->bytes_used,
7327 &fs_info->free_chunk_space);
7328 }
7329 ret = 0;
7330 return ret;
7331 }
7332
btrfs_read_sys_array(struct btrfs_fs_info * fs_info)7333 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7334 {
7335 struct btrfs_super_block *super_copy = fs_info->super_copy;
7336 struct extent_buffer *sb;
7337 struct btrfs_disk_key *disk_key;
7338 struct btrfs_chunk *chunk;
7339 u8 *array_ptr;
7340 unsigned long sb_array_offset;
7341 int ret = 0;
7342 u32 num_stripes;
7343 u32 array_size;
7344 u32 len = 0;
7345 u32 cur_offset;
7346 u64 type;
7347 struct btrfs_key key;
7348
7349 ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7350
7351 /*
7352 * We allocated a dummy extent, just to use extent buffer accessors.
7353 * There will be unused space after BTRFS_SUPER_INFO_SIZE, but
7354 * that's fine, we will not go beyond system chunk array anyway.
7355 */
7356 sb = alloc_dummy_extent_buffer(fs_info, BTRFS_SUPER_INFO_OFFSET);
7357 if (!sb)
7358 return -ENOMEM;
7359 set_extent_buffer_uptodate(sb);
7360
7361 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7362 array_size = btrfs_super_sys_array_size(super_copy);
7363
7364 array_ptr = super_copy->sys_chunk_array;
7365 sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7366 cur_offset = 0;
7367
7368 while (cur_offset < array_size) {
7369 disk_key = (struct btrfs_disk_key *)array_ptr;
7370 len = sizeof(*disk_key);
7371 if (cur_offset + len > array_size)
7372 goto out_short_read;
7373
7374 btrfs_disk_key_to_cpu(&key, disk_key);
7375
7376 array_ptr += len;
7377 sb_array_offset += len;
7378 cur_offset += len;
7379
7380 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7381 btrfs_err(fs_info,
7382 "unexpected item type %u in sys_array at offset %u",
7383 (u32)key.type, cur_offset);
7384 ret = -EIO;
7385 break;
7386 }
7387
7388 chunk = (struct btrfs_chunk *)sb_array_offset;
7389 /*
7390 * At least one btrfs_chunk with one stripe must be present,
7391 * exact stripe count check comes afterwards
7392 */
7393 len = btrfs_chunk_item_size(1);
7394 if (cur_offset + len > array_size)
7395 goto out_short_read;
7396
7397 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7398 if (!num_stripes) {
7399 btrfs_err(fs_info,
7400 "invalid number of stripes %u in sys_array at offset %u",
7401 num_stripes, cur_offset);
7402 ret = -EIO;
7403 break;
7404 }
7405
7406 type = btrfs_chunk_type(sb, chunk);
7407 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7408 btrfs_err(fs_info,
7409 "invalid chunk type %llu in sys_array at offset %u",
7410 type, cur_offset);
7411 ret = -EIO;
7412 break;
7413 }
7414
7415 len = btrfs_chunk_item_size(num_stripes);
7416 if (cur_offset + len > array_size)
7417 goto out_short_read;
7418
7419 ret = read_one_chunk(&key, sb, chunk);
7420 if (ret)
7421 break;
7422
7423 array_ptr += len;
7424 sb_array_offset += len;
7425 cur_offset += len;
7426 }
7427 clear_extent_buffer_uptodate(sb);
7428 free_extent_buffer_stale(sb);
7429 return ret;
7430
7431 out_short_read:
7432 btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7433 len, cur_offset);
7434 clear_extent_buffer_uptodate(sb);
7435 free_extent_buffer_stale(sb);
7436 return -EIO;
7437 }
7438
7439 /*
7440 * Check if all chunks in the fs are OK for read-write degraded mount
7441 *
7442 * If the @failing_dev is specified, it's accounted as missing.
7443 *
7444 * Return true if all chunks meet the minimal RW mount requirements.
7445 * Return false if any chunk doesn't meet the minimal RW mount requirements.
7446 */
btrfs_check_rw_degradable(struct btrfs_fs_info * fs_info,struct btrfs_device * failing_dev)7447 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7448 struct btrfs_device *failing_dev)
7449 {
7450 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7451 struct extent_map *em;
7452 u64 next_start = 0;
7453 bool ret = true;
7454
7455 read_lock(&map_tree->lock);
7456 em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7457 read_unlock(&map_tree->lock);
7458 /* No chunk at all? Return false anyway */
7459 if (!em) {
7460 ret = false;
7461 goto out;
7462 }
7463 while (em) {
7464 struct map_lookup *map;
7465 int missing = 0;
7466 int max_tolerated;
7467 int i;
7468
7469 map = em->map_lookup;
7470 max_tolerated =
7471 btrfs_get_num_tolerated_disk_barrier_failures(
7472 map->type);
7473 for (i = 0; i < map->num_stripes; i++) {
7474 struct btrfs_device *dev = map->stripes[i].dev;
7475
7476 if (!dev || !dev->bdev ||
7477 test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7478 dev->last_flush_error)
7479 missing++;
7480 else if (failing_dev && failing_dev == dev)
7481 missing++;
7482 }
7483 if (missing > max_tolerated) {
7484 if (!failing_dev)
7485 btrfs_warn(fs_info,
7486 "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7487 em->start, missing, max_tolerated);
7488 free_extent_map(em);
7489 ret = false;
7490 goto out;
7491 }
7492 next_start = extent_map_end(em);
7493 free_extent_map(em);
7494
7495 read_lock(&map_tree->lock);
7496 em = lookup_extent_mapping(map_tree, next_start,
7497 (u64)(-1) - next_start);
7498 read_unlock(&map_tree->lock);
7499 }
7500 out:
7501 return ret;
7502 }
7503
readahead_tree_node_children(struct extent_buffer * node)7504 static void readahead_tree_node_children(struct extent_buffer *node)
7505 {
7506 int i;
7507 const int nr_items = btrfs_header_nritems(node);
7508
7509 for (i = 0; i < nr_items; i++)
7510 btrfs_readahead_node_child(node, i);
7511 }
7512
btrfs_read_chunk_tree(struct btrfs_fs_info * fs_info)7513 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7514 {
7515 struct btrfs_root *root = fs_info->chunk_root;
7516 struct btrfs_path *path;
7517 struct extent_buffer *leaf;
7518 struct btrfs_key key;
7519 struct btrfs_key found_key;
7520 int ret;
7521 int slot;
7522 int iter_ret = 0;
7523 u64 total_dev = 0;
7524 u64 last_ra_node = 0;
7525
7526 path = btrfs_alloc_path();
7527 if (!path)
7528 return -ENOMEM;
7529
7530 /*
7531 * uuid_mutex is needed only if we are mounting a sprout FS
7532 * otherwise we don't need it.
7533 */
7534 mutex_lock(&uuid_mutex);
7535
7536 /*
7537 * It is possible for mount and umount to race in such a way that
7538 * we execute this code path, but open_fs_devices failed to clear
7539 * total_rw_bytes. We certainly want it cleared before reading the
7540 * device items, so clear it here.
7541 */
7542 fs_info->fs_devices->total_rw_bytes = 0;
7543
7544 /*
7545 * Lockdep complains about possible circular locking dependency between
7546 * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
7547 * used for freeze procection of a fs (struct super_block.s_writers),
7548 * which we take when starting a transaction, and extent buffers of the
7549 * chunk tree if we call read_one_dev() while holding a lock on an
7550 * extent buffer of the chunk tree. Since we are mounting the filesystem
7551 * and at this point there can't be any concurrent task modifying the
7552 * chunk tree, to keep it simple, just skip locking on the chunk tree.
7553 */
7554 ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
7555 path->skip_locking = 1;
7556
7557 /*
7558 * Read all device items, and then all the chunk items. All
7559 * device items are found before any chunk item (their object id
7560 * is smaller than the lowest possible object id for a chunk
7561 * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7562 */
7563 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7564 key.offset = 0;
7565 key.type = 0;
7566 btrfs_for_each_slot(root, &key, &found_key, path, iter_ret) {
7567 struct extent_buffer *node = path->nodes[1];
7568
7569 leaf = path->nodes[0];
7570 slot = path->slots[0];
7571
7572 if (node) {
7573 if (last_ra_node != node->start) {
7574 readahead_tree_node_children(node);
7575 last_ra_node = node->start;
7576 }
7577 }
7578 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7579 struct btrfs_dev_item *dev_item;
7580 dev_item = btrfs_item_ptr(leaf, slot,
7581 struct btrfs_dev_item);
7582 ret = read_one_dev(leaf, dev_item);
7583 if (ret)
7584 goto error;
7585 total_dev++;
7586 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7587 struct btrfs_chunk *chunk;
7588
7589 /*
7590 * We are only called at mount time, so no need to take
7591 * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7592 * we always lock first fs_info->chunk_mutex before
7593 * acquiring any locks on the chunk tree. This is a
7594 * requirement for chunk allocation, see the comment on
7595 * top of btrfs_chunk_alloc() for details.
7596 */
7597 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7598 ret = read_one_chunk(&found_key, leaf, chunk);
7599 if (ret)
7600 goto error;
7601 }
7602 }
7603 /* Catch error found during iteration */
7604 if (iter_ret < 0) {
7605 ret = iter_ret;
7606 goto error;
7607 }
7608
7609 /*
7610 * After loading chunk tree, we've got all device information,
7611 * do another round of validation checks.
7612 */
7613 if (total_dev != fs_info->fs_devices->total_devices) {
7614 btrfs_warn(fs_info,
7615 "super block num_devices %llu mismatch with DEV_ITEM count %llu, will be repaired on next transaction commit",
7616 btrfs_super_num_devices(fs_info->super_copy),
7617 total_dev);
7618 fs_info->fs_devices->total_devices = total_dev;
7619 btrfs_set_super_num_devices(fs_info->super_copy, total_dev);
7620 }
7621 if (btrfs_super_total_bytes(fs_info->super_copy) <
7622 fs_info->fs_devices->total_rw_bytes) {
7623 btrfs_err(fs_info,
7624 "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7625 btrfs_super_total_bytes(fs_info->super_copy),
7626 fs_info->fs_devices->total_rw_bytes);
7627 ret = -EINVAL;
7628 goto error;
7629 }
7630 ret = 0;
7631 error:
7632 mutex_unlock(&uuid_mutex);
7633
7634 btrfs_free_path(path);
7635 return ret;
7636 }
7637
btrfs_init_devices_late(struct btrfs_fs_info * fs_info)7638 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7639 {
7640 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7641 struct btrfs_device *device;
7642
7643 fs_devices->fs_info = fs_info;
7644
7645 mutex_lock(&fs_devices->device_list_mutex);
7646 list_for_each_entry(device, &fs_devices->devices, dev_list)
7647 device->fs_info = fs_info;
7648
7649 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7650 list_for_each_entry(device, &seed_devs->devices, dev_list)
7651 device->fs_info = fs_info;
7652
7653 seed_devs->fs_info = fs_info;
7654 }
7655 mutex_unlock(&fs_devices->device_list_mutex);
7656 }
7657
btrfs_dev_stats_value(const struct extent_buffer * eb,const struct btrfs_dev_stats_item * ptr,int index)7658 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7659 const struct btrfs_dev_stats_item *ptr,
7660 int index)
7661 {
7662 u64 val;
7663
7664 read_extent_buffer(eb, &val,
7665 offsetof(struct btrfs_dev_stats_item, values) +
7666 ((unsigned long)ptr) + (index * sizeof(u64)),
7667 sizeof(val));
7668 return val;
7669 }
7670
btrfs_set_dev_stats_value(struct extent_buffer * eb,struct btrfs_dev_stats_item * ptr,int index,u64 val)7671 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7672 struct btrfs_dev_stats_item *ptr,
7673 int index, u64 val)
7674 {
7675 write_extent_buffer(eb, &val,
7676 offsetof(struct btrfs_dev_stats_item, values) +
7677 ((unsigned long)ptr) + (index * sizeof(u64)),
7678 sizeof(val));
7679 }
7680
btrfs_device_init_dev_stats(struct btrfs_device * device,struct btrfs_path * path)7681 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7682 struct btrfs_path *path)
7683 {
7684 struct btrfs_dev_stats_item *ptr;
7685 struct extent_buffer *eb;
7686 struct btrfs_key key;
7687 int item_size;
7688 int i, ret, slot;
7689
7690 if (!device->fs_info->dev_root)
7691 return 0;
7692
7693 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7694 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7695 key.offset = device->devid;
7696 ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7697 if (ret) {
7698 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7699 btrfs_dev_stat_set(device, i, 0);
7700 device->dev_stats_valid = 1;
7701 btrfs_release_path(path);
7702 return ret < 0 ? ret : 0;
7703 }
7704 slot = path->slots[0];
7705 eb = path->nodes[0];
7706 item_size = btrfs_item_size(eb, slot);
7707
7708 ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7709
7710 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7711 if (item_size >= (1 + i) * sizeof(__le64))
7712 btrfs_dev_stat_set(device, i,
7713 btrfs_dev_stats_value(eb, ptr, i));
7714 else
7715 btrfs_dev_stat_set(device, i, 0);
7716 }
7717
7718 device->dev_stats_valid = 1;
7719 btrfs_dev_stat_print_on_load(device);
7720 btrfs_release_path(path);
7721
7722 return 0;
7723 }
7724
btrfs_init_dev_stats(struct btrfs_fs_info * fs_info)7725 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7726 {
7727 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7728 struct btrfs_device *device;
7729 struct btrfs_path *path = NULL;
7730 int ret = 0;
7731
7732 path = btrfs_alloc_path();
7733 if (!path)
7734 return -ENOMEM;
7735
7736 mutex_lock(&fs_devices->device_list_mutex);
7737 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7738 ret = btrfs_device_init_dev_stats(device, path);
7739 if (ret)
7740 goto out;
7741 }
7742 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7743 list_for_each_entry(device, &seed_devs->devices, dev_list) {
7744 ret = btrfs_device_init_dev_stats(device, path);
7745 if (ret)
7746 goto out;
7747 }
7748 }
7749 out:
7750 mutex_unlock(&fs_devices->device_list_mutex);
7751
7752 btrfs_free_path(path);
7753 return ret;
7754 }
7755
update_dev_stat_item(struct btrfs_trans_handle * trans,struct btrfs_device * device)7756 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7757 struct btrfs_device *device)
7758 {
7759 struct btrfs_fs_info *fs_info = trans->fs_info;
7760 struct btrfs_root *dev_root = fs_info->dev_root;
7761 struct btrfs_path *path;
7762 struct btrfs_key key;
7763 struct extent_buffer *eb;
7764 struct btrfs_dev_stats_item *ptr;
7765 int ret;
7766 int i;
7767
7768 key.objectid = BTRFS_DEV_STATS_OBJECTID;
7769 key.type = BTRFS_PERSISTENT_ITEM_KEY;
7770 key.offset = device->devid;
7771
7772 path = btrfs_alloc_path();
7773 if (!path)
7774 return -ENOMEM;
7775 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7776 if (ret < 0) {
7777 btrfs_warn_in_rcu(fs_info,
7778 "error %d while searching for dev_stats item for device %s",
7779 ret, rcu_str_deref(device->name));
7780 goto out;
7781 }
7782
7783 if (ret == 0 &&
7784 btrfs_item_size(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7785 /* need to delete old one and insert a new one */
7786 ret = btrfs_del_item(trans, dev_root, path);
7787 if (ret != 0) {
7788 btrfs_warn_in_rcu(fs_info,
7789 "delete too small dev_stats item for device %s failed %d",
7790 rcu_str_deref(device->name), ret);
7791 goto out;
7792 }
7793 ret = 1;
7794 }
7795
7796 if (ret == 1) {
7797 /* need to insert a new item */
7798 btrfs_release_path(path);
7799 ret = btrfs_insert_empty_item(trans, dev_root, path,
7800 &key, sizeof(*ptr));
7801 if (ret < 0) {
7802 btrfs_warn_in_rcu(fs_info,
7803 "insert dev_stats item for device %s failed %d",
7804 rcu_str_deref(device->name), ret);
7805 goto out;
7806 }
7807 }
7808
7809 eb = path->nodes[0];
7810 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7811 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7812 btrfs_set_dev_stats_value(eb, ptr, i,
7813 btrfs_dev_stat_read(device, i));
7814 btrfs_mark_buffer_dirty(eb);
7815
7816 out:
7817 btrfs_free_path(path);
7818 return ret;
7819 }
7820
7821 /*
7822 * called from commit_transaction. Writes all changed device stats to disk.
7823 */
btrfs_run_dev_stats(struct btrfs_trans_handle * trans)7824 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7825 {
7826 struct btrfs_fs_info *fs_info = trans->fs_info;
7827 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7828 struct btrfs_device *device;
7829 int stats_cnt;
7830 int ret = 0;
7831
7832 mutex_lock(&fs_devices->device_list_mutex);
7833 list_for_each_entry(device, &fs_devices->devices, dev_list) {
7834 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7835 if (!device->dev_stats_valid || stats_cnt == 0)
7836 continue;
7837
7838
7839 /*
7840 * There is a LOAD-LOAD control dependency between the value of
7841 * dev_stats_ccnt and updating the on-disk values which requires
7842 * reading the in-memory counters. Such control dependencies
7843 * require explicit read memory barriers.
7844 *
7845 * This memory barriers pairs with smp_mb__before_atomic in
7846 * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7847 * barrier implied by atomic_xchg in
7848 * btrfs_dev_stats_read_and_reset
7849 */
7850 smp_rmb();
7851
7852 ret = update_dev_stat_item(trans, device);
7853 if (!ret)
7854 atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7855 }
7856 mutex_unlock(&fs_devices->device_list_mutex);
7857
7858 return ret;
7859 }
7860
btrfs_dev_stat_inc_and_print(struct btrfs_device * dev,int index)7861 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7862 {
7863 btrfs_dev_stat_inc(dev, index);
7864 btrfs_dev_stat_print_on_error(dev);
7865 }
7866
btrfs_dev_stat_print_on_error(struct btrfs_device * dev)7867 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7868 {
7869 if (!dev->dev_stats_valid)
7870 return;
7871 btrfs_err_rl_in_rcu(dev->fs_info,
7872 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7873 rcu_str_deref(dev->name),
7874 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7875 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7876 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7877 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7878 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7879 }
7880
btrfs_dev_stat_print_on_load(struct btrfs_device * dev)7881 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7882 {
7883 int i;
7884
7885 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7886 if (btrfs_dev_stat_read(dev, i) != 0)
7887 break;
7888 if (i == BTRFS_DEV_STAT_VALUES_MAX)
7889 return; /* all values == 0, suppress message */
7890
7891 btrfs_info_in_rcu(dev->fs_info,
7892 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7893 rcu_str_deref(dev->name),
7894 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7895 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7896 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7897 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7898 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7899 }
7900
btrfs_get_dev_stats(struct btrfs_fs_info * fs_info,struct btrfs_ioctl_get_dev_stats * stats)7901 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7902 struct btrfs_ioctl_get_dev_stats *stats)
7903 {
7904 BTRFS_DEV_LOOKUP_ARGS(args);
7905 struct btrfs_device *dev;
7906 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7907 int i;
7908
7909 mutex_lock(&fs_devices->device_list_mutex);
7910 args.devid = stats->devid;
7911 dev = btrfs_find_device(fs_info->fs_devices, &args);
7912 mutex_unlock(&fs_devices->device_list_mutex);
7913
7914 if (!dev) {
7915 btrfs_warn(fs_info, "get dev_stats failed, device not found");
7916 return -ENODEV;
7917 } else if (!dev->dev_stats_valid) {
7918 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7919 return -ENODEV;
7920 } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7921 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7922 if (stats->nr_items > i)
7923 stats->values[i] =
7924 btrfs_dev_stat_read_and_reset(dev, i);
7925 else
7926 btrfs_dev_stat_set(dev, i, 0);
7927 }
7928 btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7929 current->comm, task_pid_nr(current));
7930 } else {
7931 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7932 if (stats->nr_items > i)
7933 stats->values[i] = btrfs_dev_stat_read(dev, i);
7934 }
7935 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7936 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7937 return 0;
7938 }
7939
7940 /*
7941 * Update the size and bytes used for each device where it changed. This is
7942 * delayed since we would otherwise get errors while writing out the
7943 * superblocks.
7944 *
7945 * Must be invoked during transaction commit.
7946 */
btrfs_commit_device_sizes(struct btrfs_transaction * trans)7947 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7948 {
7949 struct btrfs_device *curr, *next;
7950
7951 ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7952
7953 if (list_empty(&trans->dev_update_list))
7954 return;
7955
7956 /*
7957 * We don't need the device_list_mutex here. This list is owned by the
7958 * transaction and the transaction must complete before the device is
7959 * released.
7960 */
7961 mutex_lock(&trans->fs_info->chunk_mutex);
7962 list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7963 post_commit_list) {
7964 list_del_init(&curr->post_commit_list);
7965 curr->commit_total_bytes = curr->disk_total_bytes;
7966 curr->commit_bytes_used = curr->bytes_used;
7967 }
7968 mutex_unlock(&trans->fs_info->chunk_mutex);
7969 }
7970
7971 /*
7972 * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7973 */
btrfs_bg_type_to_factor(u64 flags)7974 int btrfs_bg_type_to_factor(u64 flags)
7975 {
7976 const int index = btrfs_bg_flags_to_raid_index(flags);
7977
7978 return btrfs_raid_array[index].ncopies;
7979 }
7980
7981
7982
verify_one_dev_extent(struct btrfs_fs_info * fs_info,u64 chunk_offset,u64 devid,u64 physical_offset,u64 physical_len)7983 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7984 u64 chunk_offset, u64 devid,
7985 u64 physical_offset, u64 physical_len)
7986 {
7987 struct btrfs_dev_lookup_args args = { .devid = devid };
7988 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7989 struct extent_map *em;
7990 struct map_lookup *map;
7991 struct btrfs_device *dev;
7992 u64 stripe_len;
7993 bool found = false;
7994 int ret = 0;
7995 int i;
7996
7997 read_lock(&em_tree->lock);
7998 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7999 read_unlock(&em_tree->lock);
8000
8001 if (!em) {
8002 btrfs_err(fs_info,
8003 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
8004 physical_offset, devid);
8005 ret = -EUCLEAN;
8006 goto out;
8007 }
8008
8009 map = em->map_lookup;
8010 stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
8011 if (physical_len != stripe_len) {
8012 btrfs_err(fs_info,
8013 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
8014 physical_offset, devid, em->start, physical_len,
8015 stripe_len);
8016 ret = -EUCLEAN;
8017 goto out;
8018 }
8019
8020 for (i = 0; i < map->num_stripes; i++) {
8021 if (map->stripes[i].dev->devid == devid &&
8022 map->stripes[i].physical == physical_offset) {
8023 found = true;
8024 if (map->verified_stripes >= map->num_stripes) {
8025 btrfs_err(fs_info,
8026 "too many dev extents for chunk %llu found",
8027 em->start);
8028 ret = -EUCLEAN;
8029 goto out;
8030 }
8031 map->verified_stripes++;
8032 break;
8033 }
8034 }
8035 if (!found) {
8036 btrfs_err(fs_info,
8037 "dev extent physical offset %llu devid %llu has no corresponding chunk",
8038 physical_offset, devid);
8039 ret = -EUCLEAN;
8040 }
8041
8042 /* Make sure no dev extent is beyond device boundary */
8043 dev = btrfs_find_device(fs_info->fs_devices, &args);
8044 if (!dev) {
8045 btrfs_err(fs_info, "failed to find devid %llu", devid);
8046 ret = -EUCLEAN;
8047 goto out;
8048 }
8049
8050 if (physical_offset + physical_len > dev->disk_total_bytes) {
8051 btrfs_err(fs_info,
8052 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
8053 devid, physical_offset, physical_len,
8054 dev->disk_total_bytes);
8055 ret = -EUCLEAN;
8056 goto out;
8057 }
8058
8059 if (dev->zone_info) {
8060 u64 zone_size = dev->zone_info->zone_size;
8061
8062 if (!IS_ALIGNED(physical_offset, zone_size) ||
8063 !IS_ALIGNED(physical_len, zone_size)) {
8064 btrfs_err(fs_info,
8065 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
8066 devid, physical_offset, physical_len);
8067 ret = -EUCLEAN;
8068 goto out;
8069 }
8070 }
8071
8072 out:
8073 free_extent_map(em);
8074 return ret;
8075 }
8076
verify_chunk_dev_extent_mapping(struct btrfs_fs_info * fs_info)8077 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
8078 {
8079 struct extent_map_tree *em_tree = &fs_info->mapping_tree;
8080 struct extent_map *em;
8081 struct rb_node *node;
8082 int ret = 0;
8083
8084 read_lock(&em_tree->lock);
8085 for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
8086 em = rb_entry(node, struct extent_map, rb_node);
8087 if (em->map_lookup->num_stripes !=
8088 em->map_lookup->verified_stripes) {
8089 btrfs_err(fs_info,
8090 "chunk %llu has missing dev extent, have %d expect %d",
8091 em->start, em->map_lookup->verified_stripes,
8092 em->map_lookup->num_stripes);
8093 ret = -EUCLEAN;
8094 goto out;
8095 }
8096 }
8097 out:
8098 read_unlock(&em_tree->lock);
8099 return ret;
8100 }
8101
8102 /*
8103 * Ensure that all dev extents are mapped to correct chunk, otherwise
8104 * later chunk allocation/free would cause unexpected behavior.
8105 *
8106 * NOTE: This will iterate through the whole device tree, which should be of
8107 * the same size level as the chunk tree. This slightly increases mount time.
8108 */
btrfs_verify_dev_extents(struct btrfs_fs_info * fs_info)8109 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
8110 {
8111 struct btrfs_path *path;
8112 struct btrfs_root *root = fs_info->dev_root;
8113 struct btrfs_key key;
8114 u64 prev_devid = 0;
8115 u64 prev_dev_ext_end = 0;
8116 int ret = 0;
8117
8118 /*
8119 * We don't have a dev_root because we mounted with ignorebadroots and
8120 * failed to load the root, so we want to skip the verification in this
8121 * case for sure.
8122 *
8123 * However if the dev root is fine, but the tree itself is corrupted
8124 * we'd still fail to mount. This verification is only to make sure
8125 * writes can happen safely, so instead just bypass this check
8126 * completely in the case of IGNOREBADROOTS.
8127 */
8128 if (btrfs_test_opt(fs_info, IGNOREBADROOTS))
8129 return 0;
8130
8131 key.objectid = 1;
8132 key.type = BTRFS_DEV_EXTENT_KEY;
8133 key.offset = 0;
8134
8135 path = btrfs_alloc_path();
8136 if (!path)
8137 return -ENOMEM;
8138
8139 path->reada = READA_FORWARD;
8140 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8141 if (ret < 0)
8142 goto out;
8143
8144 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8145 ret = btrfs_next_leaf(root, path);
8146 if (ret < 0)
8147 goto out;
8148 /* No dev extents at all? Not good */
8149 if (ret > 0) {
8150 ret = -EUCLEAN;
8151 goto out;
8152 }
8153 }
8154 while (1) {
8155 struct extent_buffer *leaf = path->nodes[0];
8156 struct btrfs_dev_extent *dext;
8157 int slot = path->slots[0];
8158 u64 chunk_offset;
8159 u64 physical_offset;
8160 u64 physical_len;
8161 u64 devid;
8162
8163 btrfs_item_key_to_cpu(leaf, &key, slot);
8164 if (key.type != BTRFS_DEV_EXTENT_KEY)
8165 break;
8166 devid = key.objectid;
8167 physical_offset = key.offset;
8168
8169 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
8170 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
8171 physical_len = btrfs_dev_extent_length(leaf, dext);
8172
8173 /* Check if this dev extent overlaps with the previous one */
8174 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
8175 btrfs_err(fs_info,
8176 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8177 devid, physical_offset, prev_dev_ext_end);
8178 ret = -EUCLEAN;
8179 goto out;
8180 }
8181
8182 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
8183 physical_offset, physical_len);
8184 if (ret < 0)
8185 goto out;
8186 prev_devid = devid;
8187 prev_dev_ext_end = physical_offset + physical_len;
8188
8189 ret = btrfs_next_item(root, path);
8190 if (ret < 0)
8191 goto out;
8192 if (ret > 0) {
8193 ret = 0;
8194 break;
8195 }
8196 }
8197
8198 /* Ensure all chunks have corresponding dev extents */
8199 ret = verify_chunk_dev_extent_mapping(fs_info);
8200 out:
8201 btrfs_free_path(path);
8202 return ret;
8203 }
8204
8205 /*
8206 * Check whether the given block group or device is pinned by any inode being
8207 * used as a swapfile.
8208 */
btrfs_pinned_by_swapfile(struct btrfs_fs_info * fs_info,void * ptr)8209 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
8210 {
8211 struct btrfs_swapfile_pin *sp;
8212 struct rb_node *node;
8213
8214 spin_lock(&fs_info->swapfile_pins_lock);
8215 node = fs_info->swapfile_pins.rb_node;
8216 while (node) {
8217 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
8218 if (ptr < sp->ptr)
8219 node = node->rb_left;
8220 else if (ptr > sp->ptr)
8221 node = node->rb_right;
8222 else
8223 break;
8224 }
8225 spin_unlock(&fs_info->swapfile_pins_lock);
8226 return node != NULL;
8227 }
8228
relocating_repair_kthread(void * data)8229 static int relocating_repair_kthread(void *data)
8230 {
8231 struct btrfs_block_group *cache = data;
8232 struct btrfs_fs_info *fs_info = cache->fs_info;
8233 u64 target;
8234 int ret = 0;
8235
8236 target = cache->start;
8237 btrfs_put_block_group(cache);
8238
8239 sb_start_write(fs_info->sb);
8240 if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
8241 btrfs_info(fs_info,
8242 "zoned: skip relocating block group %llu to repair: EBUSY",
8243 target);
8244 sb_end_write(fs_info->sb);
8245 return -EBUSY;
8246 }
8247
8248 mutex_lock(&fs_info->reclaim_bgs_lock);
8249
8250 /* Ensure block group still exists */
8251 cache = btrfs_lookup_block_group(fs_info, target);
8252 if (!cache)
8253 goto out;
8254
8255 if (!cache->relocating_repair)
8256 goto out;
8257
8258 ret = btrfs_may_alloc_data_chunk(fs_info, target);
8259 if (ret < 0)
8260 goto out;
8261
8262 btrfs_info(fs_info,
8263 "zoned: relocating block group %llu to repair IO failure",
8264 target);
8265 ret = btrfs_relocate_chunk(fs_info, target);
8266
8267 out:
8268 if (cache)
8269 btrfs_put_block_group(cache);
8270 mutex_unlock(&fs_info->reclaim_bgs_lock);
8271 btrfs_exclop_finish(fs_info);
8272 sb_end_write(fs_info->sb);
8273
8274 return ret;
8275 }
8276
btrfs_repair_one_zone(struct btrfs_fs_info * fs_info,u64 logical)8277 bool btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
8278 {
8279 struct btrfs_block_group *cache;
8280
8281 if (!btrfs_is_zoned(fs_info))
8282 return false;
8283
8284 /* Do not attempt to repair in degraded state */
8285 if (btrfs_test_opt(fs_info, DEGRADED))
8286 return true;
8287
8288 cache = btrfs_lookup_block_group(fs_info, logical);
8289 if (!cache)
8290 return true;
8291
8292 spin_lock(&cache->lock);
8293 if (cache->relocating_repair) {
8294 spin_unlock(&cache->lock);
8295 btrfs_put_block_group(cache);
8296 return true;
8297 }
8298 cache->relocating_repair = 1;
8299 spin_unlock(&cache->lock);
8300
8301 kthread_run(relocating_repair_kthread, cache,
8302 "btrfs-relocating-repair");
8303
8304 return true;
8305 }
8306