1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/buffer_head.h>
22 #include <linux/blkdev.h>
23 #include <linux/random.h>
24 #include <linux/iocontext.h>
25 #include <linux/capability.h>
26 #include <linux/kthread.h>
27 #include <asm/div64.h>
28 #include "compat.h"
29 #include "ctree.h"
30 #include "extent_map.h"
31 #include "disk-io.h"
32 #include "transaction.h"
33 #include "print-tree.h"
34 #include "volumes.h"
35 #include "async-thread.h"
36 #include "check-integrity.h"
37
38 static int init_first_rw_device(struct btrfs_trans_handle *trans,
39 struct btrfs_root *root,
40 struct btrfs_device *device);
41 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
42
43 static DEFINE_MUTEX(uuid_mutex);
44 static LIST_HEAD(fs_uuids);
45
lock_chunks(struct btrfs_root * root)46 static void lock_chunks(struct btrfs_root *root)
47 {
48 mutex_lock(&root->fs_info->chunk_mutex);
49 }
50
unlock_chunks(struct btrfs_root * root)51 static void unlock_chunks(struct btrfs_root *root)
52 {
53 mutex_unlock(&root->fs_info->chunk_mutex);
54 }
55
free_fs_devices(struct btrfs_fs_devices * fs_devices)56 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
57 {
58 struct btrfs_device *device;
59 WARN_ON(fs_devices->opened);
60 while (!list_empty(&fs_devices->devices)) {
61 device = list_entry(fs_devices->devices.next,
62 struct btrfs_device, dev_list);
63 list_del(&device->dev_list);
64 kfree(device->name);
65 kfree(device);
66 }
67 kfree(fs_devices);
68 }
69
btrfs_cleanup_fs_uuids(void)70 void btrfs_cleanup_fs_uuids(void)
71 {
72 struct btrfs_fs_devices *fs_devices;
73
74 while (!list_empty(&fs_uuids)) {
75 fs_devices = list_entry(fs_uuids.next,
76 struct btrfs_fs_devices, list);
77 list_del(&fs_devices->list);
78 free_fs_devices(fs_devices);
79 }
80 }
81
__find_device(struct list_head * head,u64 devid,u8 * uuid)82 static noinline struct btrfs_device *__find_device(struct list_head *head,
83 u64 devid, u8 *uuid)
84 {
85 struct btrfs_device *dev;
86
87 list_for_each_entry(dev, head, dev_list) {
88 if (dev->devid == devid &&
89 (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
90 return dev;
91 }
92 }
93 return NULL;
94 }
95
find_fsid(u8 * fsid)96 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
97 {
98 struct btrfs_fs_devices *fs_devices;
99
100 list_for_each_entry(fs_devices, &fs_uuids, list) {
101 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
102 return fs_devices;
103 }
104 return NULL;
105 }
106
requeue_list(struct btrfs_pending_bios * pending_bios,struct bio * head,struct bio * tail)107 static void requeue_list(struct btrfs_pending_bios *pending_bios,
108 struct bio *head, struct bio *tail)
109 {
110
111 struct bio *old_head;
112
113 old_head = pending_bios->head;
114 pending_bios->head = head;
115 if (pending_bios->tail)
116 tail->bi_next = old_head;
117 else
118 pending_bios->tail = tail;
119 }
120
121 /*
122 * we try to collect pending bios for a device so we don't get a large
123 * number of procs sending bios down to the same device. This greatly
124 * improves the schedulers ability to collect and merge the bios.
125 *
126 * But, it also turns into a long list of bios to process and that is sure
127 * to eventually make the worker thread block. The solution here is to
128 * make some progress and then put this work struct back at the end of
129 * the list if the block device is congested. This way, multiple devices
130 * can make progress from a single worker thread.
131 */
run_scheduled_bios(struct btrfs_device * device)132 static noinline void run_scheduled_bios(struct btrfs_device *device)
133 {
134 struct bio *pending;
135 struct backing_dev_info *bdi;
136 struct btrfs_fs_info *fs_info;
137 struct btrfs_pending_bios *pending_bios;
138 struct bio *tail;
139 struct bio *cur;
140 int again = 0;
141 unsigned long num_run;
142 unsigned long batch_run = 0;
143 unsigned long limit;
144 unsigned long last_waited = 0;
145 int force_reg = 0;
146 int sync_pending = 0;
147 struct blk_plug plug;
148
149 /*
150 * this function runs all the bios we've collected for
151 * a particular device. We don't want to wander off to
152 * another device without first sending all of these down.
153 * So, setup a plug here and finish it off before we return
154 */
155 blk_start_plug(&plug);
156
157 bdi = blk_get_backing_dev_info(device->bdev);
158 fs_info = device->dev_root->fs_info;
159 limit = btrfs_async_submit_limit(fs_info);
160 limit = limit * 2 / 3;
161
162 loop:
163 spin_lock(&device->io_lock);
164
165 loop_lock:
166 num_run = 0;
167
168 /* take all the bios off the list at once and process them
169 * later on (without the lock held). But, remember the
170 * tail and other pointers so the bios can be properly reinserted
171 * into the list if we hit congestion
172 */
173 if (!force_reg && device->pending_sync_bios.head) {
174 pending_bios = &device->pending_sync_bios;
175 force_reg = 1;
176 } else {
177 pending_bios = &device->pending_bios;
178 force_reg = 0;
179 }
180
181 pending = pending_bios->head;
182 tail = pending_bios->tail;
183 WARN_ON(pending && !tail);
184
185 /*
186 * if pending was null this time around, no bios need processing
187 * at all and we can stop. Otherwise it'll loop back up again
188 * and do an additional check so no bios are missed.
189 *
190 * device->running_pending is used to synchronize with the
191 * schedule_bio code.
192 */
193 if (device->pending_sync_bios.head == NULL &&
194 device->pending_bios.head == NULL) {
195 again = 0;
196 device->running_pending = 0;
197 } else {
198 again = 1;
199 device->running_pending = 1;
200 }
201
202 pending_bios->head = NULL;
203 pending_bios->tail = NULL;
204
205 spin_unlock(&device->io_lock);
206
207 while (pending) {
208
209 rmb();
210 /* we want to work on both lists, but do more bios on the
211 * sync list than the regular list
212 */
213 if ((num_run > 32 &&
214 pending_bios != &device->pending_sync_bios &&
215 device->pending_sync_bios.head) ||
216 (num_run > 64 && pending_bios == &device->pending_sync_bios &&
217 device->pending_bios.head)) {
218 spin_lock(&device->io_lock);
219 requeue_list(pending_bios, pending, tail);
220 goto loop_lock;
221 }
222
223 cur = pending;
224 pending = pending->bi_next;
225 cur->bi_next = NULL;
226 atomic_dec(&fs_info->nr_async_bios);
227
228 if (atomic_read(&fs_info->nr_async_bios) < limit &&
229 waitqueue_active(&fs_info->async_submit_wait))
230 wake_up(&fs_info->async_submit_wait);
231
232 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
233
234 /*
235 * if we're doing the sync list, record that our
236 * plug has some sync requests on it
237 *
238 * If we're doing the regular list and there are
239 * sync requests sitting around, unplug before
240 * we add more
241 */
242 if (pending_bios == &device->pending_sync_bios) {
243 sync_pending = 1;
244 } else if (sync_pending) {
245 blk_finish_plug(&plug);
246 blk_start_plug(&plug);
247 sync_pending = 0;
248 }
249
250 btrfsic_submit_bio(cur->bi_rw, cur);
251 num_run++;
252 batch_run++;
253 if (need_resched())
254 cond_resched();
255
256 /*
257 * we made progress, there is more work to do and the bdi
258 * is now congested. Back off and let other work structs
259 * run instead
260 */
261 if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
262 fs_info->fs_devices->open_devices > 1) {
263 struct io_context *ioc;
264
265 ioc = current->io_context;
266
267 /*
268 * the main goal here is that we don't want to
269 * block if we're going to be able to submit
270 * more requests without blocking.
271 *
272 * This code does two great things, it pokes into
273 * the elevator code from a filesystem _and_
274 * it makes assumptions about how batching works.
275 */
276 if (ioc && ioc->nr_batch_requests > 0 &&
277 time_before(jiffies, ioc->last_waited + HZ/50UL) &&
278 (last_waited == 0 ||
279 ioc->last_waited == last_waited)) {
280 /*
281 * we want to go through our batch of
282 * requests and stop. So, we copy out
283 * the ioc->last_waited time and test
284 * against it before looping
285 */
286 last_waited = ioc->last_waited;
287 if (need_resched())
288 cond_resched();
289 continue;
290 }
291 spin_lock(&device->io_lock);
292 requeue_list(pending_bios, pending, tail);
293 device->running_pending = 1;
294
295 spin_unlock(&device->io_lock);
296 btrfs_requeue_work(&device->work);
297 goto done;
298 }
299 /* unplug every 64 requests just for good measure */
300 if (batch_run % 64 == 0) {
301 blk_finish_plug(&plug);
302 blk_start_plug(&plug);
303 sync_pending = 0;
304 }
305 }
306
307 cond_resched();
308 if (again)
309 goto loop;
310
311 spin_lock(&device->io_lock);
312 if (device->pending_bios.head || device->pending_sync_bios.head)
313 goto loop_lock;
314 spin_unlock(&device->io_lock);
315
316 done:
317 blk_finish_plug(&plug);
318 }
319
pending_bios_fn(struct btrfs_work * work)320 static void pending_bios_fn(struct btrfs_work *work)
321 {
322 struct btrfs_device *device;
323
324 device = container_of(work, struct btrfs_device, work);
325 run_scheduled_bios(device);
326 }
327
device_list_add(const char * path,struct btrfs_super_block * disk_super,u64 devid,struct btrfs_fs_devices ** fs_devices_ret)328 static noinline int device_list_add(const char *path,
329 struct btrfs_super_block *disk_super,
330 u64 devid, struct btrfs_fs_devices **fs_devices_ret)
331 {
332 struct btrfs_device *device;
333 struct btrfs_fs_devices *fs_devices;
334 u64 found_transid = btrfs_super_generation(disk_super);
335 char *name;
336
337 fs_devices = find_fsid(disk_super->fsid);
338 if (!fs_devices) {
339 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
340 if (!fs_devices)
341 return -ENOMEM;
342 INIT_LIST_HEAD(&fs_devices->devices);
343 INIT_LIST_HEAD(&fs_devices->alloc_list);
344 list_add(&fs_devices->list, &fs_uuids);
345 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
346 fs_devices->latest_devid = devid;
347 fs_devices->latest_trans = found_transid;
348 mutex_init(&fs_devices->device_list_mutex);
349 device = NULL;
350 } else {
351 device = __find_device(&fs_devices->devices, devid,
352 disk_super->dev_item.uuid);
353 }
354 if (!device) {
355 if (fs_devices->opened)
356 return -EBUSY;
357
358 device = kzalloc(sizeof(*device), GFP_NOFS);
359 if (!device) {
360 /* we can safely leave the fs_devices entry around */
361 return -ENOMEM;
362 }
363 device->devid = devid;
364 device->work.func = pending_bios_fn;
365 memcpy(device->uuid, disk_super->dev_item.uuid,
366 BTRFS_UUID_SIZE);
367 spin_lock_init(&device->io_lock);
368 device->name = kstrdup(path, GFP_NOFS);
369 if (!device->name) {
370 kfree(device);
371 return -ENOMEM;
372 }
373 INIT_LIST_HEAD(&device->dev_alloc_list);
374
375 /* init readahead state */
376 spin_lock_init(&device->reada_lock);
377 device->reada_curr_zone = NULL;
378 atomic_set(&device->reada_in_flight, 0);
379 device->reada_next = 0;
380 INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
381 INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
382
383 mutex_lock(&fs_devices->device_list_mutex);
384 list_add_rcu(&device->dev_list, &fs_devices->devices);
385 mutex_unlock(&fs_devices->device_list_mutex);
386
387 device->fs_devices = fs_devices;
388 fs_devices->num_devices++;
389 } else if (!device->name || strcmp(device->name, path)) {
390 name = kstrdup(path, GFP_NOFS);
391 if (!name)
392 return -ENOMEM;
393 kfree(device->name);
394 device->name = name;
395 if (device->missing) {
396 fs_devices->missing_devices--;
397 device->missing = 0;
398 }
399 }
400
401 if (found_transid > fs_devices->latest_trans) {
402 fs_devices->latest_devid = devid;
403 fs_devices->latest_trans = found_transid;
404 }
405 *fs_devices_ret = fs_devices;
406 return 0;
407 }
408
clone_fs_devices(struct btrfs_fs_devices * orig)409 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
410 {
411 struct btrfs_fs_devices *fs_devices;
412 struct btrfs_device *device;
413 struct btrfs_device *orig_dev;
414
415 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
416 if (!fs_devices)
417 return ERR_PTR(-ENOMEM);
418
419 INIT_LIST_HEAD(&fs_devices->devices);
420 INIT_LIST_HEAD(&fs_devices->alloc_list);
421 INIT_LIST_HEAD(&fs_devices->list);
422 mutex_init(&fs_devices->device_list_mutex);
423 fs_devices->latest_devid = orig->latest_devid;
424 fs_devices->latest_trans = orig->latest_trans;
425 memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
426
427 /* We have held the volume lock, it is safe to get the devices. */
428 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
429 device = kzalloc(sizeof(*device), GFP_NOFS);
430 if (!device)
431 goto error;
432
433 device->name = kstrdup(orig_dev->name, GFP_NOFS);
434 if (!device->name) {
435 kfree(device);
436 goto error;
437 }
438
439 device->devid = orig_dev->devid;
440 device->work.func = pending_bios_fn;
441 memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
442 spin_lock_init(&device->io_lock);
443 INIT_LIST_HEAD(&device->dev_list);
444 INIT_LIST_HEAD(&device->dev_alloc_list);
445
446 list_add(&device->dev_list, &fs_devices->devices);
447 device->fs_devices = fs_devices;
448 fs_devices->num_devices++;
449 }
450 return fs_devices;
451 error:
452 free_fs_devices(fs_devices);
453 return ERR_PTR(-ENOMEM);
454 }
455
btrfs_close_extra_devices(struct btrfs_fs_devices * fs_devices)456 void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
457 {
458 struct btrfs_device *device, *next;
459
460 struct block_device *latest_bdev = NULL;
461 u64 latest_devid = 0;
462 u64 latest_transid = 0;
463
464 mutex_lock(&uuid_mutex);
465 again:
466 /* This is the initialized path, it is safe to release the devices. */
467 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
468 if (device->in_fs_metadata) {
469 if (!latest_transid ||
470 device->generation > latest_transid) {
471 latest_devid = device->devid;
472 latest_transid = device->generation;
473 latest_bdev = device->bdev;
474 }
475 continue;
476 }
477
478 if (device->bdev) {
479 blkdev_put(device->bdev, device->mode);
480 device->bdev = NULL;
481 fs_devices->open_devices--;
482 }
483 if (device->writeable) {
484 list_del_init(&device->dev_alloc_list);
485 device->writeable = 0;
486 fs_devices->rw_devices--;
487 }
488 list_del_init(&device->dev_list);
489 fs_devices->num_devices--;
490 kfree(device->name);
491 kfree(device);
492 }
493
494 if (fs_devices->seed) {
495 fs_devices = fs_devices->seed;
496 goto again;
497 }
498
499 fs_devices->latest_bdev = latest_bdev;
500 fs_devices->latest_devid = latest_devid;
501 fs_devices->latest_trans = latest_transid;
502
503 mutex_unlock(&uuid_mutex);
504 }
505
__free_device(struct work_struct * work)506 static void __free_device(struct work_struct *work)
507 {
508 struct btrfs_device *device;
509
510 device = container_of(work, struct btrfs_device, rcu_work);
511
512 if (device->bdev)
513 blkdev_put(device->bdev, device->mode);
514
515 kfree(device->name);
516 kfree(device);
517 }
518
free_device(struct rcu_head * head)519 static void free_device(struct rcu_head *head)
520 {
521 struct btrfs_device *device;
522
523 device = container_of(head, struct btrfs_device, rcu);
524
525 INIT_WORK(&device->rcu_work, __free_device);
526 schedule_work(&device->rcu_work);
527 }
528
__btrfs_close_devices(struct btrfs_fs_devices * fs_devices)529 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
530 {
531 struct btrfs_device *device;
532
533 if (--fs_devices->opened > 0)
534 return 0;
535
536 mutex_lock(&fs_devices->device_list_mutex);
537 list_for_each_entry(device, &fs_devices->devices, dev_list) {
538 struct btrfs_device *new_device;
539
540 if (device->bdev)
541 fs_devices->open_devices--;
542
543 if (device->writeable) {
544 list_del_init(&device->dev_alloc_list);
545 fs_devices->rw_devices--;
546 }
547
548 if (device->can_discard)
549 fs_devices->num_can_discard--;
550
551 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
552 BUG_ON(!new_device); /* -ENOMEM */
553 memcpy(new_device, device, sizeof(*new_device));
554 new_device->name = kstrdup(device->name, GFP_NOFS);
555 BUG_ON(device->name && !new_device->name); /* -ENOMEM */
556 new_device->bdev = NULL;
557 new_device->writeable = 0;
558 new_device->in_fs_metadata = 0;
559 new_device->can_discard = 0;
560 spin_lock_init(&new_device->io_lock);
561 list_replace_rcu(&device->dev_list, &new_device->dev_list);
562
563 call_rcu(&device->rcu, free_device);
564 }
565 mutex_unlock(&fs_devices->device_list_mutex);
566
567 WARN_ON(fs_devices->open_devices);
568 WARN_ON(fs_devices->rw_devices);
569 fs_devices->opened = 0;
570 fs_devices->seeding = 0;
571
572 return 0;
573 }
574
btrfs_close_devices(struct btrfs_fs_devices * fs_devices)575 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
576 {
577 struct btrfs_fs_devices *seed_devices = NULL;
578 int ret;
579
580 mutex_lock(&uuid_mutex);
581 ret = __btrfs_close_devices(fs_devices);
582 if (!fs_devices->opened) {
583 seed_devices = fs_devices->seed;
584 fs_devices->seed = NULL;
585 }
586 mutex_unlock(&uuid_mutex);
587
588 while (seed_devices) {
589 fs_devices = seed_devices;
590 seed_devices = fs_devices->seed;
591 __btrfs_close_devices(fs_devices);
592 free_fs_devices(fs_devices);
593 }
594 /*
595 * Wait for rcu kworkers under __btrfs_close_devices
596 * to finish all blkdev_puts so device is really
597 * free when umount is done.
598 */
599 rcu_barrier();
600 return ret;
601 }
602
__btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)603 static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
604 fmode_t flags, void *holder)
605 {
606 struct request_queue *q;
607 struct block_device *bdev;
608 struct list_head *head = &fs_devices->devices;
609 struct btrfs_device *device;
610 struct block_device *latest_bdev = NULL;
611 struct buffer_head *bh;
612 struct btrfs_super_block *disk_super;
613 u64 latest_devid = 0;
614 u64 latest_transid = 0;
615 u64 devid;
616 int seeding = 1;
617 int ret = 0;
618
619 flags |= FMODE_EXCL;
620
621 list_for_each_entry(device, head, dev_list) {
622 if (device->bdev)
623 continue;
624 if (!device->name)
625 continue;
626
627 bdev = blkdev_get_by_path(device->name, flags, holder);
628 if (IS_ERR(bdev)) {
629 printk(KERN_INFO "open %s failed\n", device->name);
630 goto error;
631 }
632 filemap_write_and_wait(bdev->bd_inode->i_mapping);
633 invalidate_bdev(bdev);
634 set_blocksize(bdev, 4096);
635
636 bh = btrfs_read_dev_super(bdev);
637 if (!bh)
638 goto error_close;
639
640 disk_super = (struct btrfs_super_block *)bh->b_data;
641 devid = btrfs_stack_device_id(&disk_super->dev_item);
642 if (devid != device->devid)
643 goto error_brelse;
644
645 if (memcmp(device->uuid, disk_super->dev_item.uuid,
646 BTRFS_UUID_SIZE))
647 goto error_brelse;
648
649 device->generation = btrfs_super_generation(disk_super);
650 if (!latest_transid || device->generation > latest_transid) {
651 latest_devid = devid;
652 latest_transid = device->generation;
653 latest_bdev = bdev;
654 }
655
656 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
657 device->writeable = 0;
658 } else {
659 device->writeable = !bdev_read_only(bdev);
660 seeding = 0;
661 }
662
663 q = bdev_get_queue(bdev);
664 if (blk_queue_discard(q)) {
665 device->can_discard = 1;
666 fs_devices->num_can_discard++;
667 }
668
669 device->bdev = bdev;
670 device->in_fs_metadata = 0;
671 device->mode = flags;
672
673 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
674 fs_devices->rotating = 1;
675
676 fs_devices->open_devices++;
677 if (device->writeable) {
678 fs_devices->rw_devices++;
679 list_add(&device->dev_alloc_list,
680 &fs_devices->alloc_list);
681 }
682 brelse(bh);
683 continue;
684
685 error_brelse:
686 brelse(bh);
687 error_close:
688 blkdev_put(bdev, flags);
689 error:
690 continue;
691 }
692 if (fs_devices->open_devices == 0) {
693 ret = -EINVAL;
694 goto out;
695 }
696 fs_devices->seeding = seeding;
697 fs_devices->opened = 1;
698 fs_devices->latest_bdev = latest_bdev;
699 fs_devices->latest_devid = latest_devid;
700 fs_devices->latest_trans = latest_transid;
701 fs_devices->total_rw_bytes = 0;
702 out:
703 return ret;
704 }
705
btrfs_open_devices(struct btrfs_fs_devices * fs_devices,fmode_t flags,void * holder)706 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
707 fmode_t flags, void *holder)
708 {
709 int ret;
710
711 mutex_lock(&uuid_mutex);
712 if (fs_devices->opened) {
713 fs_devices->opened++;
714 ret = 0;
715 } else {
716 ret = __btrfs_open_devices(fs_devices, flags, holder);
717 }
718 mutex_unlock(&uuid_mutex);
719 return ret;
720 }
721
btrfs_scan_one_device(const char * path,fmode_t flags,void * holder,struct btrfs_fs_devices ** fs_devices_ret)722 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
723 struct btrfs_fs_devices **fs_devices_ret)
724 {
725 struct btrfs_super_block *disk_super;
726 struct block_device *bdev;
727 struct buffer_head *bh;
728 int ret;
729 u64 devid;
730 u64 transid;
731
732 flags |= FMODE_EXCL;
733 bdev = blkdev_get_by_path(path, flags, holder);
734
735 if (IS_ERR(bdev)) {
736 ret = PTR_ERR(bdev);
737 goto error;
738 }
739
740 mutex_lock(&uuid_mutex);
741 ret = set_blocksize(bdev, 4096);
742 if (ret)
743 goto error_close;
744 bh = btrfs_read_dev_super(bdev);
745 if (!bh) {
746 ret = -EINVAL;
747 goto error_close;
748 }
749 disk_super = (struct btrfs_super_block *)bh->b_data;
750 devid = btrfs_stack_device_id(&disk_super->dev_item);
751 transid = btrfs_super_generation(disk_super);
752 if (disk_super->label[0])
753 printk(KERN_INFO "device label %s ", disk_super->label);
754 else
755 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
756 printk(KERN_CONT "devid %llu transid %llu %s\n",
757 (unsigned long long)devid, (unsigned long long)transid, path);
758 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
759
760 brelse(bh);
761 error_close:
762 mutex_unlock(&uuid_mutex);
763 blkdev_put(bdev, flags);
764 error:
765 return ret;
766 }
767
768 /* helper to account the used device space in the range */
btrfs_account_dev_extents_size(struct btrfs_device * device,u64 start,u64 end,u64 * length)769 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
770 u64 end, u64 *length)
771 {
772 struct btrfs_key key;
773 struct btrfs_root *root = device->dev_root;
774 struct btrfs_dev_extent *dev_extent;
775 struct btrfs_path *path;
776 u64 extent_end;
777 int ret;
778 int slot;
779 struct extent_buffer *l;
780
781 *length = 0;
782
783 if (start >= device->total_bytes)
784 return 0;
785
786 path = btrfs_alloc_path();
787 if (!path)
788 return -ENOMEM;
789 path->reada = 2;
790
791 key.objectid = device->devid;
792 key.offset = start;
793 key.type = BTRFS_DEV_EXTENT_KEY;
794
795 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
796 if (ret < 0)
797 goto out;
798 if (ret > 0) {
799 ret = btrfs_previous_item(root, path, key.objectid, key.type);
800 if (ret < 0)
801 goto out;
802 }
803
804 while (1) {
805 l = path->nodes[0];
806 slot = path->slots[0];
807 if (slot >= btrfs_header_nritems(l)) {
808 ret = btrfs_next_leaf(root, path);
809 if (ret == 0)
810 continue;
811 if (ret < 0)
812 goto out;
813
814 break;
815 }
816 btrfs_item_key_to_cpu(l, &key, slot);
817
818 if (key.objectid < device->devid)
819 goto next;
820
821 if (key.objectid > device->devid)
822 break;
823
824 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
825 goto next;
826
827 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
828 extent_end = key.offset + btrfs_dev_extent_length(l,
829 dev_extent);
830 if (key.offset <= start && extent_end > end) {
831 *length = end - start + 1;
832 break;
833 } else if (key.offset <= start && extent_end > start)
834 *length += extent_end - start;
835 else if (key.offset > start && extent_end <= end)
836 *length += extent_end - key.offset;
837 else if (key.offset > start && key.offset <= end) {
838 *length += end - key.offset + 1;
839 break;
840 } else if (key.offset > end)
841 break;
842
843 next:
844 path->slots[0]++;
845 }
846 ret = 0;
847 out:
848 btrfs_free_path(path);
849 return ret;
850 }
851
852 /*
853 * find_free_dev_extent - find free space in the specified device
854 * @device: the device which we search the free space in
855 * @num_bytes: the size of the free space that we need
856 * @start: store the start of the free space.
857 * @len: the size of the free space. that we find, or the size of the max
858 * free space if we don't find suitable free space
859 *
860 * this uses a pretty simple search, the expectation is that it is
861 * called very infrequently and that a given device has a small number
862 * of extents
863 *
864 * @start is used to store the start of the free space if we find. But if we
865 * don't find suitable free space, it will be used to store the start position
866 * of the max free space.
867 *
868 * @len is used to store the size of the free space that we find.
869 * But if we don't find suitable free space, it is used to store the size of
870 * the max free space.
871 */
find_free_dev_extent(struct btrfs_device * device,u64 num_bytes,u64 * start,u64 * len)872 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
873 u64 *start, u64 *len)
874 {
875 struct btrfs_key key;
876 struct btrfs_root *root = device->dev_root;
877 struct btrfs_dev_extent *dev_extent;
878 struct btrfs_path *path;
879 u64 hole_size;
880 u64 max_hole_start;
881 u64 max_hole_size;
882 u64 extent_end;
883 u64 search_start;
884 u64 search_end = device->total_bytes;
885 int ret;
886 int slot;
887 struct extent_buffer *l;
888
889 /* FIXME use last free of some kind */
890
891 /* we don't want to overwrite the superblock on the drive,
892 * so we make sure to start at an offset of at least 1MB
893 */
894 search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
895
896 max_hole_start = search_start;
897 max_hole_size = 0;
898 hole_size = 0;
899
900 if (search_start >= search_end) {
901 ret = -ENOSPC;
902 goto error;
903 }
904
905 path = btrfs_alloc_path();
906 if (!path) {
907 ret = -ENOMEM;
908 goto error;
909 }
910 path->reada = 2;
911
912 key.objectid = device->devid;
913 key.offset = search_start;
914 key.type = BTRFS_DEV_EXTENT_KEY;
915
916 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
917 if (ret < 0)
918 goto out;
919 if (ret > 0) {
920 ret = btrfs_previous_item(root, path, key.objectid, key.type);
921 if (ret < 0)
922 goto out;
923 }
924
925 while (1) {
926 l = path->nodes[0];
927 slot = path->slots[0];
928 if (slot >= btrfs_header_nritems(l)) {
929 ret = btrfs_next_leaf(root, path);
930 if (ret == 0)
931 continue;
932 if (ret < 0)
933 goto out;
934
935 break;
936 }
937 btrfs_item_key_to_cpu(l, &key, slot);
938
939 if (key.objectid < device->devid)
940 goto next;
941
942 if (key.objectid > device->devid)
943 break;
944
945 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
946 goto next;
947
948 if (key.offset > search_start) {
949 hole_size = key.offset - search_start;
950
951 if (hole_size > max_hole_size) {
952 max_hole_start = search_start;
953 max_hole_size = hole_size;
954 }
955
956 /*
957 * If this free space is greater than which we need,
958 * it must be the max free space that we have found
959 * until now, so max_hole_start must point to the start
960 * of this free space and the length of this free space
961 * is stored in max_hole_size. Thus, we return
962 * max_hole_start and max_hole_size and go back to the
963 * caller.
964 */
965 if (hole_size >= num_bytes) {
966 ret = 0;
967 goto out;
968 }
969 }
970
971 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
972 extent_end = key.offset + btrfs_dev_extent_length(l,
973 dev_extent);
974 if (extent_end > search_start)
975 search_start = extent_end;
976 next:
977 path->slots[0]++;
978 cond_resched();
979 }
980
981 /*
982 * At this point, search_start should be the end of
983 * allocated dev extents, and when shrinking the device,
984 * search_end may be smaller than search_start.
985 */
986 if (search_end > search_start)
987 hole_size = search_end - search_start;
988
989 if (hole_size > max_hole_size) {
990 max_hole_start = search_start;
991 max_hole_size = hole_size;
992 }
993
994 /* See above. */
995 if (hole_size < num_bytes)
996 ret = -ENOSPC;
997 else
998 ret = 0;
999
1000 out:
1001 btrfs_free_path(path);
1002 error:
1003 *start = max_hole_start;
1004 if (len)
1005 *len = max_hole_size;
1006 return ret;
1007 }
1008
btrfs_free_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 start)1009 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1010 struct btrfs_device *device,
1011 u64 start)
1012 {
1013 int ret;
1014 struct btrfs_path *path;
1015 struct btrfs_root *root = device->dev_root;
1016 struct btrfs_key key;
1017 struct btrfs_key found_key;
1018 struct extent_buffer *leaf = NULL;
1019 struct btrfs_dev_extent *extent = NULL;
1020
1021 path = btrfs_alloc_path();
1022 if (!path)
1023 return -ENOMEM;
1024
1025 key.objectid = device->devid;
1026 key.offset = start;
1027 key.type = BTRFS_DEV_EXTENT_KEY;
1028 again:
1029 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1030 if (ret > 0) {
1031 ret = btrfs_previous_item(root, path, key.objectid,
1032 BTRFS_DEV_EXTENT_KEY);
1033 if (ret)
1034 goto out;
1035 leaf = path->nodes[0];
1036 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1037 extent = btrfs_item_ptr(leaf, path->slots[0],
1038 struct btrfs_dev_extent);
1039 BUG_ON(found_key.offset > start || found_key.offset +
1040 btrfs_dev_extent_length(leaf, extent) < start);
1041 key = found_key;
1042 btrfs_release_path(path);
1043 goto again;
1044 } else if (ret == 0) {
1045 leaf = path->nodes[0];
1046 extent = btrfs_item_ptr(leaf, path->slots[0],
1047 struct btrfs_dev_extent);
1048 } else {
1049 btrfs_error(root->fs_info, ret, "Slot search failed");
1050 goto out;
1051 }
1052
1053 if (device->bytes_used > 0) {
1054 u64 len = btrfs_dev_extent_length(leaf, extent);
1055 device->bytes_used -= len;
1056 spin_lock(&root->fs_info->free_chunk_lock);
1057 root->fs_info->free_chunk_space += len;
1058 spin_unlock(&root->fs_info->free_chunk_lock);
1059 }
1060 ret = btrfs_del_item(trans, root, path);
1061 if (ret) {
1062 btrfs_error(root->fs_info, ret,
1063 "Failed to remove dev extent item");
1064 }
1065 out:
1066 btrfs_free_path(path);
1067 return ret;
1068 }
1069
btrfs_alloc_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 chunk_tree,u64 chunk_objectid,u64 chunk_offset,u64 start,u64 num_bytes)1070 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1071 struct btrfs_device *device,
1072 u64 chunk_tree, u64 chunk_objectid,
1073 u64 chunk_offset, u64 start, u64 num_bytes)
1074 {
1075 int ret;
1076 struct btrfs_path *path;
1077 struct btrfs_root *root = device->dev_root;
1078 struct btrfs_dev_extent *extent;
1079 struct extent_buffer *leaf;
1080 struct btrfs_key key;
1081
1082 WARN_ON(!device->in_fs_metadata);
1083 path = btrfs_alloc_path();
1084 if (!path)
1085 return -ENOMEM;
1086
1087 key.objectid = device->devid;
1088 key.offset = start;
1089 key.type = BTRFS_DEV_EXTENT_KEY;
1090 ret = btrfs_insert_empty_item(trans, root, path, &key,
1091 sizeof(*extent));
1092 if (ret)
1093 goto out;
1094
1095 leaf = path->nodes[0];
1096 extent = btrfs_item_ptr(leaf, path->slots[0],
1097 struct btrfs_dev_extent);
1098 btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
1099 btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
1100 btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1101
1102 write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
1103 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
1104 BTRFS_UUID_SIZE);
1105
1106 btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1107 btrfs_mark_buffer_dirty(leaf);
1108 out:
1109 btrfs_free_path(path);
1110 return ret;
1111 }
1112
find_next_chunk(struct btrfs_root * root,u64 objectid,u64 * offset)1113 static noinline int find_next_chunk(struct btrfs_root *root,
1114 u64 objectid, u64 *offset)
1115 {
1116 struct btrfs_path *path;
1117 int ret;
1118 struct btrfs_key key;
1119 struct btrfs_chunk *chunk;
1120 struct btrfs_key found_key;
1121
1122 path = btrfs_alloc_path();
1123 if (!path)
1124 return -ENOMEM;
1125
1126 key.objectid = objectid;
1127 key.offset = (u64)-1;
1128 key.type = BTRFS_CHUNK_ITEM_KEY;
1129
1130 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1131 if (ret < 0)
1132 goto error;
1133
1134 BUG_ON(ret == 0); /* Corruption */
1135
1136 ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
1137 if (ret) {
1138 *offset = 0;
1139 } else {
1140 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1141 path->slots[0]);
1142 if (found_key.objectid != objectid)
1143 *offset = 0;
1144 else {
1145 chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
1146 struct btrfs_chunk);
1147 *offset = found_key.offset +
1148 btrfs_chunk_length(path->nodes[0], chunk);
1149 }
1150 }
1151 ret = 0;
1152 error:
1153 btrfs_free_path(path);
1154 return ret;
1155 }
1156
find_next_devid(struct btrfs_root * root,u64 * objectid)1157 static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
1158 {
1159 int ret;
1160 struct btrfs_key key;
1161 struct btrfs_key found_key;
1162 struct btrfs_path *path;
1163
1164 root = root->fs_info->chunk_root;
1165
1166 path = btrfs_alloc_path();
1167 if (!path)
1168 return -ENOMEM;
1169
1170 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1171 key.type = BTRFS_DEV_ITEM_KEY;
1172 key.offset = (u64)-1;
1173
1174 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1175 if (ret < 0)
1176 goto error;
1177
1178 BUG_ON(ret == 0); /* Corruption */
1179
1180 ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
1181 BTRFS_DEV_ITEM_KEY);
1182 if (ret) {
1183 *objectid = 1;
1184 } else {
1185 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1186 path->slots[0]);
1187 *objectid = found_key.offset + 1;
1188 }
1189 ret = 0;
1190 error:
1191 btrfs_free_path(path);
1192 return ret;
1193 }
1194
1195 /*
1196 * the device information is stored in the chunk root
1197 * the btrfs_device struct should be fully filled in
1198 */
btrfs_add_device(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_device * device)1199 int btrfs_add_device(struct btrfs_trans_handle *trans,
1200 struct btrfs_root *root,
1201 struct btrfs_device *device)
1202 {
1203 int ret;
1204 struct btrfs_path *path;
1205 struct btrfs_dev_item *dev_item;
1206 struct extent_buffer *leaf;
1207 struct btrfs_key key;
1208 unsigned long ptr;
1209
1210 root = root->fs_info->chunk_root;
1211
1212 path = btrfs_alloc_path();
1213 if (!path)
1214 return -ENOMEM;
1215
1216 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1217 key.type = BTRFS_DEV_ITEM_KEY;
1218 key.offset = device->devid;
1219
1220 ret = btrfs_insert_empty_item(trans, root, path, &key,
1221 sizeof(*dev_item));
1222 if (ret)
1223 goto out;
1224
1225 leaf = path->nodes[0];
1226 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1227
1228 btrfs_set_device_id(leaf, dev_item, device->devid);
1229 btrfs_set_device_generation(leaf, dev_item, 0);
1230 btrfs_set_device_type(leaf, dev_item, device->type);
1231 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1232 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1233 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1234 btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1235 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1236 btrfs_set_device_group(leaf, dev_item, 0);
1237 btrfs_set_device_seek_speed(leaf, dev_item, 0);
1238 btrfs_set_device_bandwidth(leaf, dev_item, 0);
1239 btrfs_set_device_start_offset(leaf, dev_item, 0);
1240
1241 ptr = (unsigned long)btrfs_device_uuid(dev_item);
1242 write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1243 ptr = (unsigned long)btrfs_device_fsid(dev_item);
1244 write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
1245 btrfs_mark_buffer_dirty(leaf);
1246
1247 ret = 0;
1248 out:
1249 btrfs_free_path(path);
1250 return ret;
1251 }
1252
btrfs_rm_dev_item(struct btrfs_root * root,struct btrfs_device * device)1253 static int btrfs_rm_dev_item(struct btrfs_root *root,
1254 struct btrfs_device *device)
1255 {
1256 int ret;
1257 struct btrfs_path *path;
1258 struct btrfs_key key;
1259 struct btrfs_trans_handle *trans;
1260
1261 root = root->fs_info->chunk_root;
1262
1263 path = btrfs_alloc_path();
1264 if (!path)
1265 return -ENOMEM;
1266
1267 trans = btrfs_start_transaction(root, 0);
1268 if (IS_ERR(trans)) {
1269 btrfs_free_path(path);
1270 return PTR_ERR(trans);
1271 }
1272 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1273 key.type = BTRFS_DEV_ITEM_KEY;
1274 key.offset = device->devid;
1275 lock_chunks(root);
1276
1277 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1278 if (ret < 0)
1279 goto out;
1280
1281 if (ret > 0) {
1282 ret = -ENOENT;
1283 goto out;
1284 }
1285
1286 ret = btrfs_del_item(trans, root, path);
1287 if (ret)
1288 goto out;
1289 out:
1290 btrfs_free_path(path);
1291 unlock_chunks(root);
1292 btrfs_commit_transaction(trans, root);
1293 return ret;
1294 }
1295
btrfs_rm_device(struct btrfs_root * root,char * device_path)1296 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1297 {
1298 struct btrfs_device *device;
1299 struct btrfs_device *next_device;
1300 struct block_device *bdev;
1301 struct buffer_head *bh = NULL;
1302 struct btrfs_super_block *disk_super;
1303 struct btrfs_fs_devices *cur_devices;
1304 u64 all_avail;
1305 u64 devid;
1306 u64 num_devices;
1307 u8 *dev_uuid;
1308 int ret = 0;
1309 bool clear_super = false;
1310
1311 mutex_lock(&uuid_mutex);
1312
1313 all_avail = root->fs_info->avail_data_alloc_bits |
1314 root->fs_info->avail_system_alloc_bits |
1315 root->fs_info->avail_metadata_alloc_bits;
1316
1317 if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
1318 root->fs_info->fs_devices->num_devices <= 4) {
1319 printk(KERN_ERR "btrfs: unable to go below four devices "
1320 "on raid10\n");
1321 ret = -EINVAL;
1322 goto out;
1323 }
1324
1325 if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
1326 root->fs_info->fs_devices->num_devices <= 2) {
1327 printk(KERN_ERR "btrfs: unable to go below two "
1328 "devices on raid1\n");
1329 ret = -EINVAL;
1330 goto out;
1331 }
1332
1333 if (strcmp(device_path, "missing") == 0) {
1334 struct list_head *devices;
1335 struct btrfs_device *tmp;
1336
1337 device = NULL;
1338 devices = &root->fs_info->fs_devices->devices;
1339 /*
1340 * It is safe to read the devices since the volume_mutex
1341 * is held.
1342 */
1343 list_for_each_entry(tmp, devices, dev_list) {
1344 if (tmp->in_fs_metadata && !tmp->bdev) {
1345 device = tmp;
1346 break;
1347 }
1348 }
1349 bdev = NULL;
1350 bh = NULL;
1351 disk_super = NULL;
1352 if (!device) {
1353 printk(KERN_ERR "btrfs: no missing devices found to "
1354 "remove\n");
1355 goto out;
1356 }
1357 } else {
1358 bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
1359 root->fs_info->bdev_holder);
1360 if (IS_ERR(bdev)) {
1361 ret = PTR_ERR(bdev);
1362 goto out;
1363 }
1364
1365 set_blocksize(bdev, 4096);
1366 invalidate_bdev(bdev);
1367 bh = btrfs_read_dev_super(bdev);
1368 if (!bh) {
1369 ret = -EINVAL;
1370 goto error_close;
1371 }
1372 disk_super = (struct btrfs_super_block *)bh->b_data;
1373 devid = btrfs_stack_device_id(&disk_super->dev_item);
1374 dev_uuid = disk_super->dev_item.uuid;
1375 device = btrfs_find_device(root, devid, dev_uuid,
1376 disk_super->fsid);
1377 if (!device) {
1378 ret = -ENOENT;
1379 goto error_brelse;
1380 }
1381 }
1382
1383 if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
1384 printk(KERN_ERR "btrfs: unable to remove the only writeable "
1385 "device\n");
1386 ret = -EINVAL;
1387 goto error_brelse;
1388 }
1389
1390 if (device->writeable) {
1391 lock_chunks(root);
1392 list_del_init(&device->dev_alloc_list);
1393 unlock_chunks(root);
1394 root->fs_info->fs_devices->rw_devices--;
1395 clear_super = true;
1396 }
1397
1398 ret = btrfs_shrink_device(device, 0);
1399 if (ret)
1400 goto error_undo;
1401
1402 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1403 if (ret)
1404 goto error_undo;
1405
1406 spin_lock(&root->fs_info->free_chunk_lock);
1407 root->fs_info->free_chunk_space = device->total_bytes -
1408 device->bytes_used;
1409 spin_unlock(&root->fs_info->free_chunk_lock);
1410
1411 device->in_fs_metadata = 0;
1412 btrfs_scrub_cancel_dev(root, device);
1413
1414 /*
1415 * the device list mutex makes sure that we don't change
1416 * the device list while someone else is writing out all
1417 * the device supers.
1418 */
1419
1420 cur_devices = device->fs_devices;
1421 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1422 list_del_rcu(&device->dev_list);
1423
1424 device->fs_devices->num_devices--;
1425
1426 if (device->missing)
1427 root->fs_info->fs_devices->missing_devices--;
1428
1429 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1430 struct btrfs_device, dev_list);
1431 if (device->bdev == root->fs_info->sb->s_bdev)
1432 root->fs_info->sb->s_bdev = next_device->bdev;
1433 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1434 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1435
1436 if (device->bdev)
1437 device->fs_devices->open_devices--;
1438
1439 call_rcu(&device->rcu, free_device);
1440 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1441
1442 num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
1443 btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
1444
1445 if (cur_devices->open_devices == 0) {
1446 struct btrfs_fs_devices *fs_devices;
1447 fs_devices = root->fs_info->fs_devices;
1448 while (fs_devices) {
1449 if (fs_devices->seed == cur_devices) {
1450 fs_devices->seed = cur_devices->seed;
1451 break;
1452 }
1453 fs_devices = fs_devices->seed;
1454 }
1455 cur_devices->seed = NULL;
1456 lock_chunks(root);
1457 __btrfs_close_devices(cur_devices);
1458 unlock_chunks(root);
1459 free_fs_devices(cur_devices);
1460 }
1461
1462 /*
1463 * at this point, the device is zero sized. We want to
1464 * remove it from the devices list and zero out the old super
1465 */
1466 if (clear_super) {
1467 /* make sure this device isn't detected as part of
1468 * the FS anymore
1469 */
1470 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
1471 set_buffer_dirty(bh);
1472 sync_dirty_buffer(bh);
1473 }
1474
1475 ret = 0;
1476
1477 error_brelse:
1478 brelse(bh);
1479 error_close:
1480 if (bdev)
1481 blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
1482 out:
1483 mutex_unlock(&uuid_mutex);
1484 return ret;
1485 error_undo:
1486 if (device->writeable) {
1487 lock_chunks(root);
1488 list_add(&device->dev_alloc_list,
1489 &root->fs_info->fs_devices->alloc_list);
1490 unlock_chunks(root);
1491 root->fs_info->fs_devices->rw_devices++;
1492 }
1493 goto error_brelse;
1494 }
1495
1496 /*
1497 * does all the dirty work required for changing file system's UUID.
1498 */
btrfs_prepare_sprout(struct btrfs_root * root)1499 static int btrfs_prepare_sprout(struct btrfs_root *root)
1500 {
1501 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
1502 struct btrfs_fs_devices *old_devices;
1503 struct btrfs_fs_devices *seed_devices;
1504 struct btrfs_super_block *disk_super = root->fs_info->super_copy;
1505 struct btrfs_device *device;
1506 u64 super_flags;
1507
1508 BUG_ON(!mutex_is_locked(&uuid_mutex));
1509 if (!fs_devices->seeding)
1510 return -EINVAL;
1511
1512 seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
1513 if (!seed_devices)
1514 return -ENOMEM;
1515
1516 old_devices = clone_fs_devices(fs_devices);
1517 if (IS_ERR(old_devices)) {
1518 kfree(seed_devices);
1519 return PTR_ERR(old_devices);
1520 }
1521
1522 list_add(&old_devices->list, &fs_uuids);
1523
1524 memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
1525 seed_devices->opened = 1;
1526 INIT_LIST_HEAD(&seed_devices->devices);
1527 INIT_LIST_HEAD(&seed_devices->alloc_list);
1528 mutex_init(&seed_devices->device_list_mutex);
1529
1530 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1531 list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
1532 synchronize_rcu);
1533 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1534
1535 list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
1536 list_for_each_entry(device, &seed_devices->devices, dev_list) {
1537 device->fs_devices = seed_devices;
1538 }
1539
1540 fs_devices->seeding = 0;
1541 fs_devices->num_devices = 0;
1542 fs_devices->open_devices = 0;
1543 fs_devices->seed = seed_devices;
1544
1545 generate_random_uuid(fs_devices->fsid);
1546 memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1547 memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
1548 super_flags = btrfs_super_flags(disk_super) &
1549 ~BTRFS_SUPER_FLAG_SEEDING;
1550 btrfs_set_super_flags(disk_super, super_flags);
1551
1552 return 0;
1553 }
1554
1555 /*
1556 * strore the expected generation for seed devices in device items.
1557 */
btrfs_finish_sprout(struct btrfs_trans_handle * trans,struct btrfs_root * root)1558 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
1559 struct btrfs_root *root)
1560 {
1561 struct btrfs_path *path;
1562 struct extent_buffer *leaf;
1563 struct btrfs_dev_item *dev_item;
1564 struct btrfs_device *device;
1565 struct btrfs_key key;
1566 u8 fs_uuid[BTRFS_UUID_SIZE];
1567 u8 dev_uuid[BTRFS_UUID_SIZE];
1568 u64 devid;
1569 int ret;
1570
1571 path = btrfs_alloc_path();
1572 if (!path)
1573 return -ENOMEM;
1574
1575 root = root->fs_info->chunk_root;
1576 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1577 key.offset = 0;
1578 key.type = BTRFS_DEV_ITEM_KEY;
1579
1580 while (1) {
1581 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1582 if (ret < 0)
1583 goto error;
1584
1585 leaf = path->nodes[0];
1586 next_slot:
1587 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1588 ret = btrfs_next_leaf(root, path);
1589 if (ret > 0)
1590 break;
1591 if (ret < 0)
1592 goto error;
1593 leaf = path->nodes[0];
1594 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1595 btrfs_release_path(path);
1596 continue;
1597 }
1598
1599 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1600 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
1601 key.type != BTRFS_DEV_ITEM_KEY)
1602 break;
1603
1604 dev_item = btrfs_item_ptr(leaf, path->slots[0],
1605 struct btrfs_dev_item);
1606 devid = btrfs_device_id(leaf, dev_item);
1607 read_extent_buffer(leaf, dev_uuid,
1608 (unsigned long)btrfs_device_uuid(dev_item),
1609 BTRFS_UUID_SIZE);
1610 read_extent_buffer(leaf, fs_uuid,
1611 (unsigned long)btrfs_device_fsid(dev_item),
1612 BTRFS_UUID_SIZE);
1613 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
1614 BUG_ON(!device); /* Logic error */
1615
1616 if (device->fs_devices->seeding) {
1617 btrfs_set_device_generation(leaf, dev_item,
1618 device->generation);
1619 btrfs_mark_buffer_dirty(leaf);
1620 }
1621
1622 path->slots[0]++;
1623 goto next_slot;
1624 }
1625 ret = 0;
1626 error:
1627 btrfs_free_path(path);
1628 return ret;
1629 }
1630
btrfs_init_new_device(struct btrfs_root * root,char * device_path)1631 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1632 {
1633 struct request_queue *q;
1634 struct btrfs_trans_handle *trans;
1635 struct btrfs_device *device;
1636 struct block_device *bdev;
1637 struct list_head *devices;
1638 struct super_block *sb = root->fs_info->sb;
1639 u64 total_bytes;
1640 int seeding_dev = 0;
1641 int ret = 0;
1642
1643 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1644 return -EINVAL;
1645
1646 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1647 root->fs_info->bdev_holder);
1648 if (IS_ERR(bdev))
1649 return PTR_ERR(bdev);
1650
1651 if (root->fs_info->fs_devices->seeding) {
1652 seeding_dev = 1;
1653 down_write(&sb->s_umount);
1654 mutex_lock(&uuid_mutex);
1655 }
1656
1657 filemap_write_and_wait(bdev->bd_inode->i_mapping);
1658
1659 devices = &root->fs_info->fs_devices->devices;
1660 /*
1661 * we have the volume lock, so we don't need the extra
1662 * device list mutex while reading the list here.
1663 */
1664 list_for_each_entry(device, devices, dev_list) {
1665 if (device->bdev == bdev) {
1666 ret = -EEXIST;
1667 goto error;
1668 }
1669 }
1670
1671 device = kzalloc(sizeof(*device), GFP_NOFS);
1672 if (!device) {
1673 /* we can safely leave the fs_devices entry around */
1674 ret = -ENOMEM;
1675 goto error;
1676 }
1677
1678 device->name = kstrdup(device_path, GFP_NOFS);
1679 if (!device->name) {
1680 kfree(device);
1681 ret = -ENOMEM;
1682 goto error;
1683 }
1684
1685 ret = find_next_devid(root, &device->devid);
1686 if (ret) {
1687 kfree(device->name);
1688 kfree(device);
1689 goto error;
1690 }
1691
1692 trans = btrfs_start_transaction(root, 0);
1693 if (IS_ERR(trans)) {
1694 kfree(device->name);
1695 kfree(device);
1696 ret = PTR_ERR(trans);
1697 goto error;
1698 }
1699
1700 lock_chunks(root);
1701
1702 q = bdev_get_queue(bdev);
1703 if (blk_queue_discard(q))
1704 device->can_discard = 1;
1705 device->writeable = 1;
1706 device->work.func = pending_bios_fn;
1707 generate_random_uuid(device->uuid);
1708 spin_lock_init(&device->io_lock);
1709 device->generation = trans->transid;
1710 device->io_width = root->sectorsize;
1711 device->io_align = root->sectorsize;
1712 device->sector_size = root->sectorsize;
1713 device->total_bytes = i_size_read(bdev->bd_inode);
1714 device->disk_total_bytes = device->total_bytes;
1715 device->dev_root = root->fs_info->dev_root;
1716 device->bdev = bdev;
1717 device->in_fs_metadata = 1;
1718 device->mode = FMODE_EXCL;
1719 set_blocksize(device->bdev, 4096);
1720
1721 if (seeding_dev) {
1722 sb->s_flags &= ~MS_RDONLY;
1723 ret = btrfs_prepare_sprout(root);
1724 BUG_ON(ret); /* -ENOMEM */
1725 }
1726
1727 device->fs_devices = root->fs_info->fs_devices;
1728
1729 /*
1730 * we don't want write_supers to jump in here with our device
1731 * half setup
1732 */
1733 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
1734 list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
1735 list_add(&device->dev_alloc_list,
1736 &root->fs_info->fs_devices->alloc_list);
1737 root->fs_info->fs_devices->num_devices++;
1738 root->fs_info->fs_devices->open_devices++;
1739 root->fs_info->fs_devices->rw_devices++;
1740 if (device->can_discard)
1741 root->fs_info->fs_devices->num_can_discard++;
1742 root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
1743
1744 spin_lock(&root->fs_info->free_chunk_lock);
1745 root->fs_info->free_chunk_space += device->total_bytes;
1746 spin_unlock(&root->fs_info->free_chunk_lock);
1747
1748 if (!blk_queue_nonrot(bdev_get_queue(bdev)))
1749 root->fs_info->fs_devices->rotating = 1;
1750
1751 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
1752 btrfs_set_super_total_bytes(root->fs_info->super_copy,
1753 total_bytes + device->total_bytes);
1754
1755 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
1756 btrfs_set_super_num_devices(root->fs_info->super_copy,
1757 total_bytes + 1);
1758 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
1759
1760 if (seeding_dev) {
1761 ret = init_first_rw_device(trans, root, device);
1762 if (ret)
1763 goto error_trans;
1764 ret = btrfs_finish_sprout(trans, root);
1765 if (ret)
1766 goto error_trans;
1767 } else {
1768 ret = btrfs_add_device(trans, root, device);
1769 if (ret)
1770 goto error_trans;
1771 }
1772
1773 /*
1774 * we've got more storage, clear any full flags on the space
1775 * infos
1776 */
1777 btrfs_clear_space_info_full(root->fs_info);
1778
1779 unlock_chunks(root);
1780 ret = btrfs_commit_transaction(trans, root);
1781
1782 if (seeding_dev) {
1783 mutex_unlock(&uuid_mutex);
1784 up_write(&sb->s_umount);
1785
1786 if (ret) /* transaction commit */
1787 return ret;
1788
1789 ret = btrfs_relocate_sys_chunks(root);
1790 if (ret < 0)
1791 btrfs_error(root->fs_info, ret,
1792 "Failed to relocate sys chunks after "
1793 "device initialization. This can be fixed "
1794 "using the \"btrfs balance\" command.");
1795 }
1796
1797 return ret;
1798
1799 error_trans:
1800 unlock_chunks(root);
1801 btrfs_abort_transaction(trans, root, ret);
1802 btrfs_end_transaction(trans, root);
1803 kfree(device->name);
1804 kfree(device);
1805 error:
1806 blkdev_put(bdev, FMODE_EXCL);
1807 if (seeding_dev) {
1808 mutex_unlock(&uuid_mutex);
1809 up_write(&sb->s_umount);
1810 }
1811 return ret;
1812 }
1813
btrfs_update_device(struct btrfs_trans_handle * trans,struct btrfs_device * device)1814 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
1815 struct btrfs_device *device)
1816 {
1817 int ret;
1818 struct btrfs_path *path;
1819 struct btrfs_root *root;
1820 struct btrfs_dev_item *dev_item;
1821 struct extent_buffer *leaf;
1822 struct btrfs_key key;
1823
1824 root = device->dev_root->fs_info->chunk_root;
1825
1826 path = btrfs_alloc_path();
1827 if (!path)
1828 return -ENOMEM;
1829
1830 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1831 key.type = BTRFS_DEV_ITEM_KEY;
1832 key.offset = device->devid;
1833
1834 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1835 if (ret < 0)
1836 goto out;
1837
1838 if (ret > 0) {
1839 ret = -ENOENT;
1840 goto out;
1841 }
1842
1843 leaf = path->nodes[0];
1844 dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1845
1846 btrfs_set_device_id(leaf, dev_item, device->devid);
1847 btrfs_set_device_type(leaf, dev_item, device->type);
1848 btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1849 btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1850 btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1851 btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
1852 btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1853 btrfs_mark_buffer_dirty(leaf);
1854
1855 out:
1856 btrfs_free_path(path);
1857 return ret;
1858 }
1859
__btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)1860 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1861 struct btrfs_device *device, u64 new_size)
1862 {
1863 struct btrfs_super_block *super_copy =
1864 device->dev_root->fs_info->super_copy;
1865 u64 old_total = btrfs_super_total_bytes(super_copy);
1866 u64 diff = new_size - device->total_bytes;
1867
1868 if (!device->writeable)
1869 return -EACCES;
1870 if (new_size <= device->total_bytes)
1871 return -EINVAL;
1872
1873 btrfs_set_super_total_bytes(super_copy, old_total + diff);
1874 device->fs_devices->total_rw_bytes += diff;
1875
1876 device->total_bytes = new_size;
1877 device->disk_total_bytes = new_size;
1878 btrfs_clear_space_info_full(device->dev_root->fs_info);
1879
1880 return btrfs_update_device(trans, device);
1881 }
1882
btrfs_grow_device(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 new_size)1883 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1884 struct btrfs_device *device, u64 new_size)
1885 {
1886 int ret;
1887 lock_chunks(device->dev_root);
1888 ret = __btrfs_grow_device(trans, device, new_size);
1889 unlock_chunks(device->dev_root);
1890 return ret;
1891 }
1892
btrfs_free_chunk(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 chunk_tree,u64 chunk_objectid,u64 chunk_offset)1893 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1894 struct btrfs_root *root,
1895 u64 chunk_tree, u64 chunk_objectid,
1896 u64 chunk_offset)
1897 {
1898 int ret;
1899 struct btrfs_path *path;
1900 struct btrfs_key key;
1901
1902 root = root->fs_info->chunk_root;
1903 path = btrfs_alloc_path();
1904 if (!path)
1905 return -ENOMEM;
1906
1907 key.objectid = chunk_objectid;
1908 key.offset = chunk_offset;
1909 key.type = BTRFS_CHUNK_ITEM_KEY;
1910
1911 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1912 if (ret < 0)
1913 goto out;
1914 else if (ret > 0) { /* Logic error or corruption */
1915 btrfs_error(root->fs_info, -ENOENT,
1916 "Failed lookup while freeing chunk.");
1917 ret = -ENOENT;
1918 goto out;
1919 }
1920
1921 ret = btrfs_del_item(trans, root, path);
1922 if (ret < 0)
1923 btrfs_error(root->fs_info, ret,
1924 "Failed to delete chunk item.");
1925 out:
1926 btrfs_free_path(path);
1927 return ret;
1928 }
1929
btrfs_del_sys_chunk(struct btrfs_root * root,u64 chunk_objectid,u64 chunk_offset)1930 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1931 chunk_offset)
1932 {
1933 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
1934 struct btrfs_disk_key *disk_key;
1935 struct btrfs_chunk *chunk;
1936 u8 *ptr;
1937 int ret = 0;
1938 u32 num_stripes;
1939 u32 array_size;
1940 u32 len = 0;
1941 u32 cur;
1942 struct btrfs_key key;
1943
1944 array_size = btrfs_super_sys_array_size(super_copy);
1945
1946 ptr = super_copy->sys_chunk_array;
1947 cur = 0;
1948
1949 while (cur < array_size) {
1950 disk_key = (struct btrfs_disk_key *)ptr;
1951 btrfs_disk_key_to_cpu(&key, disk_key);
1952
1953 len = sizeof(*disk_key);
1954
1955 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1956 chunk = (struct btrfs_chunk *)(ptr + len);
1957 num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1958 len += btrfs_chunk_item_size(num_stripes);
1959 } else {
1960 ret = -EIO;
1961 break;
1962 }
1963 if (key.objectid == chunk_objectid &&
1964 key.offset == chunk_offset) {
1965 memmove(ptr, ptr + len, array_size - (cur + len));
1966 array_size -= len;
1967 btrfs_set_super_sys_array_size(super_copy, array_size);
1968 } else {
1969 ptr += len;
1970 cur += len;
1971 }
1972 }
1973 return ret;
1974 }
1975
btrfs_relocate_chunk(struct btrfs_root * root,u64 chunk_tree,u64 chunk_objectid,u64 chunk_offset)1976 static int btrfs_relocate_chunk(struct btrfs_root *root,
1977 u64 chunk_tree, u64 chunk_objectid,
1978 u64 chunk_offset)
1979 {
1980 struct extent_map_tree *em_tree;
1981 struct btrfs_root *extent_root;
1982 struct btrfs_trans_handle *trans;
1983 struct extent_map *em;
1984 struct map_lookup *map;
1985 int ret;
1986 int i;
1987
1988 root = root->fs_info->chunk_root;
1989 extent_root = root->fs_info->extent_root;
1990 em_tree = &root->fs_info->mapping_tree.map_tree;
1991
1992 ret = btrfs_can_relocate(extent_root, chunk_offset);
1993 if (ret)
1994 return -ENOSPC;
1995
1996 /* step one, relocate all the extents inside this chunk */
1997 ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1998 if (ret)
1999 return ret;
2000
2001 trans = btrfs_start_transaction(root, 0);
2002 BUG_ON(IS_ERR(trans));
2003
2004 lock_chunks(root);
2005
2006 /*
2007 * step two, delete the device extents and the
2008 * chunk tree entries
2009 */
2010 read_lock(&em_tree->lock);
2011 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
2012 read_unlock(&em_tree->lock);
2013
2014 BUG_ON(!em || em->start > chunk_offset ||
2015 em->start + em->len < chunk_offset);
2016 map = (struct map_lookup *)em->bdev;
2017
2018 for (i = 0; i < map->num_stripes; i++) {
2019 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
2020 map->stripes[i].physical);
2021 BUG_ON(ret);
2022
2023 if (map->stripes[i].dev) {
2024 ret = btrfs_update_device(trans, map->stripes[i].dev);
2025 BUG_ON(ret);
2026 }
2027 }
2028 ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
2029 chunk_offset);
2030
2031 BUG_ON(ret);
2032
2033 trace_btrfs_chunk_free(root, map, chunk_offset, em->len);
2034
2035 if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2036 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
2037 BUG_ON(ret);
2038 }
2039
2040 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
2041 BUG_ON(ret);
2042
2043 write_lock(&em_tree->lock);
2044 remove_extent_mapping(em_tree, em);
2045 write_unlock(&em_tree->lock);
2046
2047 kfree(map);
2048 em->bdev = NULL;
2049
2050 /* once for the tree */
2051 free_extent_map(em);
2052 /* once for us */
2053 free_extent_map(em);
2054
2055 unlock_chunks(root);
2056 btrfs_end_transaction(trans, root);
2057 return 0;
2058 }
2059
btrfs_relocate_sys_chunks(struct btrfs_root * root)2060 static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
2061 {
2062 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
2063 struct btrfs_path *path;
2064 struct extent_buffer *leaf;
2065 struct btrfs_chunk *chunk;
2066 struct btrfs_key key;
2067 struct btrfs_key found_key;
2068 u64 chunk_tree = chunk_root->root_key.objectid;
2069 u64 chunk_type;
2070 bool retried = false;
2071 int failed = 0;
2072 int ret;
2073
2074 path = btrfs_alloc_path();
2075 if (!path)
2076 return -ENOMEM;
2077
2078 again:
2079 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2080 key.offset = (u64)-1;
2081 key.type = BTRFS_CHUNK_ITEM_KEY;
2082
2083 while (1) {
2084 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2085 if (ret < 0)
2086 goto error;
2087 BUG_ON(ret == 0); /* Corruption */
2088
2089 ret = btrfs_previous_item(chunk_root, path, key.objectid,
2090 key.type);
2091 if (ret < 0)
2092 goto error;
2093 if (ret > 0)
2094 break;
2095
2096 leaf = path->nodes[0];
2097 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2098
2099 chunk = btrfs_item_ptr(leaf, path->slots[0],
2100 struct btrfs_chunk);
2101 chunk_type = btrfs_chunk_type(leaf, chunk);
2102 btrfs_release_path(path);
2103
2104 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
2105 ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
2106 found_key.objectid,
2107 found_key.offset);
2108 if (ret == -ENOSPC)
2109 failed++;
2110 else if (ret)
2111 BUG();
2112 }
2113
2114 if (found_key.offset == 0)
2115 break;
2116 key.offset = found_key.offset - 1;
2117 }
2118 ret = 0;
2119 if (failed && !retried) {
2120 failed = 0;
2121 retried = true;
2122 goto again;
2123 } else if (failed && retried) {
2124 WARN_ON(1);
2125 ret = -ENOSPC;
2126 }
2127 error:
2128 btrfs_free_path(path);
2129 return ret;
2130 }
2131
insert_balance_item(struct btrfs_root * root,struct btrfs_balance_control * bctl)2132 static int insert_balance_item(struct btrfs_root *root,
2133 struct btrfs_balance_control *bctl)
2134 {
2135 struct btrfs_trans_handle *trans;
2136 struct btrfs_balance_item *item;
2137 struct btrfs_disk_balance_args disk_bargs;
2138 struct btrfs_path *path;
2139 struct extent_buffer *leaf;
2140 struct btrfs_key key;
2141 int ret, err;
2142
2143 path = btrfs_alloc_path();
2144 if (!path)
2145 return -ENOMEM;
2146
2147 trans = btrfs_start_transaction(root, 0);
2148 if (IS_ERR(trans)) {
2149 btrfs_free_path(path);
2150 return PTR_ERR(trans);
2151 }
2152
2153 key.objectid = BTRFS_BALANCE_OBJECTID;
2154 key.type = BTRFS_BALANCE_ITEM_KEY;
2155 key.offset = 0;
2156
2157 ret = btrfs_insert_empty_item(trans, root, path, &key,
2158 sizeof(*item));
2159 if (ret)
2160 goto out;
2161
2162 leaf = path->nodes[0];
2163 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2164
2165 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
2166
2167 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
2168 btrfs_set_balance_data(leaf, item, &disk_bargs);
2169 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
2170 btrfs_set_balance_meta(leaf, item, &disk_bargs);
2171 btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
2172 btrfs_set_balance_sys(leaf, item, &disk_bargs);
2173
2174 btrfs_set_balance_flags(leaf, item, bctl->flags);
2175
2176 btrfs_mark_buffer_dirty(leaf);
2177 out:
2178 btrfs_free_path(path);
2179 err = btrfs_commit_transaction(trans, root);
2180 if (err && !ret)
2181 ret = err;
2182 return ret;
2183 }
2184
del_balance_item(struct btrfs_root * root)2185 static int del_balance_item(struct btrfs_root *root)
2186 {
2187 struct btrfs_trans_handle *trans;
2188 struct btrfs_path *path;
2189 struct btrfs_key key;
2190 int ret, err;
2191
2192 path = btrfs_alloc_path();
2193 if (!path)
2194 return -ENOMEM;
2195
2196 trans = btrfs_start_transaction(root, 0);
2197 if (IS_ERR(trans)) {
2198 btrfs_free_path(path);
2199 return PTR_ERR(trans);
2200 }
2201
2202 key.objectid = BTRFS_BALANCE_OBJECTID;
2203 key.type = BTRFS_BALANCE_ITEM_KEY;
2204 key.offset = 0;
2205
2206 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2207 if (ret < 0)
2208 goto out;
2209 if (ret > 0) {
2210 ret = -ENOENT;
2211 goto out;
2212 }
2213
2214 ret = btrfs_del_item(trans, root, path);
2215 out:
2216 btrfs_free_path(path);
2217 err = btrfs_commit_transaction(trans, root);
2218 if (err && !ret)
2219 ret = err;
2220 return ret;
2221 }
2222
2223 /*
2224 * This is a heuristic used to reduce the number of chunks balanced on
2225 * resume after balance was interrupted.
2226 */
update_balance_args(struct btrfs_balance_control * bctl)2227 static void update_balance_args(struct btrfs_balance_control *bctl)
2228 {
2229 /*
2230 * Turn on soft mode for chunk types that were being converted.
2231 */
2232 if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
2233 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
2234 if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
2235 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
2236 if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
2237 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
2238
2239 /*
2240 * Turn on usage filter if is not already used. The idea is
2241 * that chunks that we have already balanced should be
2242 * reasonably full. Don't do it for chunks that are being
2243 * converted - that will keep us from relocating unconverted
2244 * (albeit full) chunks.
2245 */
2246 if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2247 !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2248 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
2249 bctl->data.usage = 90;
2250 }
2251 if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2252 !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2253 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
2254 bctl->sys.usage = 90;
2255 }
2256 if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
2257 !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
2258 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
2259 bctl->meta.usage = 90;
2260 }
2261 }
2262
2263 /*
2264 * Should be called with both balance and volume mutexes held to
2265 * serialize other volume operations (add_dev/rm_dev/resize) with
2266 * restriper. Same goes for unset_balance_control.
2267 */
set_balance_control(struct btrfs_balance_control * bctl)2268 static void set_balance_control(struct btrfs_balance_control *bctl)
2269 {
2270 struct btrfs_fs_info *fs_info = bctl->fs_info;
2271
2272 BUG_ON(fs_info->balance_ctl);
2273
2274 spin_lock(&fs_info->balance_lock);
2275 fs_info->balance_ctl = bctl;
2276 spin_unlock(&fs_info->balance_lock);
2277 }
2278
unset_balance_control(struct btrfs_fs_info * fs_info)2279 static void unset_balance_control(struct btrfs_fs_info *fs_info)
2280 {
2281 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2282
2283 BUG_ON(!fs_info->balance_ctl);
2284
2285 spin_lock(&fs_info->balance_lock);
2286 fs_info->balance_ctl = NULL;
2287 spin_unlock(&fs_info->balance_lock);
2288
2289 kfree(bctl);
2290 }
2291
2292 /*
2293 * Balance filters. Return 1 if chunk should be filtered out
2294 * (should not be balanced).
2295 */
chunk_profiles_filter(u64 chunk_type,struct btrfs_balance_args * bargs)2296 static int chunk_profiles_filter(u64 chunk_type,
2297 struct btrfs_balance_args *bargs)
2298 {
2299 chunk_type = chunk_to_extended(chunk_type) &
2300 BTRFS_EXTENDED_PROFILE_MASK;
2301
2302 if (bargs->profiles & chunk_type)
2303 return 0;
2304
2305 return 1;
2306 }
2307
div_factor_fine(u64 num,int factor)2308 static u64 div_factor_fine(u64 num, int factor)
2309 {
2310 if (factor <= 0)
2311 return 0;
2312 if (factor >= 100)
2313 return num;
2314
2315 num *= factor;
2316 do_div(num, 100);
2317 return num;
2318 }
2319
chunk_usage_filter(struct btrfs_fs_info * fs_info,u64 chunk_offset,struct btrfs_balance_args * bargs)2320 static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2321 struct btrfs_balance_args *bargs)
2322 {
2323 struct btrfs_block_group_cache *cache;
2324 u64 chunk_used, user_thresh;
2325 int ret = 1;
2326
2327 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2328 chunk_used = btrfs_block_group_used(&cache->item);
2329
2330 user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
2331 if (chunk_used < user_thresh)
2332 ret = 0;
2333
2334 btrfs_put_block_group(cache);
2335 return ret;
2336 }
2337
chunk_devid_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,struct btrfs_balance_args * bargs)2338 static int chunk_devid_filter(struct extent_buffer *leaf,
2339 struct btrfs_chunk *chunk,
2340 struct btrfs_balance_args *bargs)
2341 {
2342 struct btrfs_stripe *stripe;
2343 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2344 int i;
2345
2346 for (i = 0; i < num_stripes; i++) {
2347 stripe = btrfs_stripe_nr(chunk, i);
2348 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
2349 return 0;
2350 }
2351
2352 return 1;
2353 }
2354
2355 /* [pstart, pend) */
chunk_drange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)2356 static int chunk_drange_filter(struct extent_buffer *leaf,
2357 struct btrfs_chunk *chunk,
2358 u64 chunk_offset,
2359 struct btrfs_balance_args *bargs)
2360 {
2361 struct btrfs_stripe *stripe;
2362 int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2363 u64 stripe_offset;
2364 u64 stripe_length;
2365 int factor;
2366 int i;
2367
2368 if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
2369 return 0;
2370
2371 if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
2372 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
2373 factor = 2;
2374 else
2375 factor = 1;
2376 factor = num_stripes / factor;
2377
2378 for (i = 0; i < num_stripes; i++) {
2379 stripe = btrfs_stripe_nr(chunk, i);
2380 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
2381 continue;
2382
2383 stripe_offset = btrfs_stripe_offset(leaf, stripe);
2384 stripe_length = btrfs_chunk_length(leaf, chunk);
2385 do_div(stripe_length, factor);
2386
2387 if (stripe_offset < bargs->pend &&
2388 stripe_offset + stripe_length > bargs->pstart)
2389 return 0;
2390 }
2391
2392 return 1;
2393 }
2394
2395 /* [vstart, vend) */
chunk_vrange_filter(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset,struct btrfs_balance_args * bargs)2396 static int chunk_vrange_filter(struct extent_buffer *leaf,
2397 struct btrfs_chunk *chunk,
2398 u64 chunk_offset,
2399 struct btrfs_balance_args *bargs)
2400 {
2401 if (chunk_offset < bargs->vend &&
2402 chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
2403 /* at least part of the chunk is inside this vrange */
2404 return 0;
2405
2406 return 1;
2407 }
2408
chunk_soft_convert_filter(u64 chunk_type,struct btrfs_balance_args * bargs)2409 static int chunk_soft_convert_filter(u64 chunk_type,
2410 struct btrfs_balance_args *bargs)
2411 {
2412 if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
2413 return 0;
2414
2415 chunk_type = chunk_to_extended(chunk_type) &
2416 BTRFS_EXTENDED_PROFILE_MASK;
2417
2418 if (bargs->target == chunk_type)
2419 return 1;
2420
2421 return 0;
2422 }
2423
should_balance_chunk(struct btrfs_root * root,struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 chunk_offset)2424 static int should_balance_chunk(struct btrfs_root *root,
2425 struct extent_buffer *leaf,
2426 struct btrfs_chunk *chunk, u64 chunk_offset)
2427 {
2428 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
2429 struct btrfs_balance_args *bargs = NULL;
2430 u64 chunk_type = btrfs_chunk_type(leaf, chunk);
2431
2432 /* type filter */
2433 if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
2434 (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
2435 return 0;
2436 }
2437
2438 if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
2439 bargs = &bctl->data;
2440 else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
2441 bargs = &bctl->sys;
2442 else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
2443 bargs = &bctl->meta;
2444
2445 /* profiles filter */
2446 if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
2447 chunk_profiles_filter(chunk_type, bargs)) {
2448 return 0;
2449 }
2450
2451 /* usage filter */
2452 if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
2453 chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
2454 return 0;
2455 }
2456
2457 /* devid filter */
2458 if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
2459 chunk_devid_filter(leaf, chunk, bargs)) {
2460 return 0;
2461 }
2462
2463 /* drange filter, makes sense only with devid filter */
2464 if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
2465 chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
2466 return 0;
2467 }
2468
2469 /* vrange filter */
2470 if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
2471 chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
2472 return 0;
2473 }
2474
2475 /* soft profile changing mode */
2476 if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
2477 chunk_soft_convert_filter(chunk_type, bargs)) {
2478 return 0;
2479 }
2480
2481 return 1;
2482 }
2483
div_factor(u64 num,int factor)2484 static u64 div_factor(u64 num, int factor)
2485 {
2486 if (factor == 10)
2487 return num;
2488 num *= factor;
2489 do_div(num, 10);
2490 return num;
2491 }
2492
__btrfs_balance(struct btrfs_fs_info * fs_info)2493 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
2494 {
2495 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
2496 struct btrfs_root *chunk_root = fs_info->chunk_root;
2497 struct btrfs_root *dev_root = fs_info->dev_root;
2498 struct list_head *devices;
2499 struct btrfs_device *device;
2500 u64 old_size;
2501 u64 size_to_free;
2502 struct btrfs_chunk *chunk;
2503 struct btrfs_path *path;
2504 struct btrfs_key key;
2505 struct btrfs_key found_key;
2506 struct btrfs_trans_handle *trans;
2507 struct extent_buffer *leaf;
2508 int slot;
2509 int ret;
2510 int enospc_errors = 0;
2511 bool counting = true;
2512
2513 /* step one make some room on all the devices */
2514 devices = &fs_info->fs_devices->devices;
2515 list_for_each_entry(device, devices, dev_list) {
2516 old_size = device->total_bytes;
2517 size_to_free = div_factor(old_size, 1);
2518 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
2519 if (!device->writeable ||
2520 device->total_bytes - device->bytes_used > size_to_free)
2521 continue;
2522
2523 ret = btrfs_shrink_device(device, old_size - size_to_free);
2524 if (ret == -ENOSPC)
2525 break;
2526 BUG_ON(ret);
2527
2528 trans = btrfs_start_transaction(dev_root, 0);
2529 BUG_ON(IS_ERR(trans));
2530
2531 ret = btrfs_grow_device(trans, device, old_size);
2532 BUG_ON(ret);
2533
2534 btrfs_end_transaction(trans, dev_root);
2535 }
2536
2537 /* step two, relocate all the chunks */
2538 path = btrfs_alloc_path();
2539 if (!path) {
2540 ret = -ENOMEM;
2541 goto error;
2542 }
2543
2544 /* zero out stat counters */
2545 spin_lock(&fs_info->balance_lock);
2546 memset(&bctl->stat, 0, sizeof(bctl->stat));
2547 spin_unlock(&fs_info->balance_lock);
2548 again:
2549 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2550 key.offset = (u64)-1;
2551 key.type = BTRFS_CHUNK_ITEM_KEY;
2552
2553 while (1) {
2554 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
2555 atomic_read(&fs_info->balance_cancel_req)) {
2556 ret = -ECANCELED;
2557 goto error;
2558 }
2559
2560 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
2561 if (ret < 0)
2562 goto error;
2563
2564 /*
2565 * this shouldn't happen, it means the last relocate
2566 * failed
2567 */
2568 if (ret == 0)
2569 BUG(); /* FIXME break ? */
2570
2571 ret = btrfs_previous_item(chunk_root, path, 0,
2572 BTRFS_CHUNK_ITEM_KEY);
2573 if (ret) {
2574 ret = 0;
2575 break;
2576 }
2577
2578 leaf = path->nodes[0];
2579 slot = path->slots[0];
2580 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2581
2582 if (found_key.objectid != key.objectid)
2583 break;
2584
2585 /* chunk zero is special */
2586 if (found_key.offset == 0)
2587 break;
2588
2589 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2590
2591 if (!counting) {
2592 spin_lock(&fs_info->balance_lock);
2593 bctl->stat.considered++;
2594 spin_unlock(&fs_info->balance_lock);
2595 }
2596
2597 ret = should_balance_chunk(chunk_root, leaf, chunk,
2598 found_key.offset);
2599 btrfs_release_path(path);
2600 if (!ret)
2601 goto loop;
2602
2603 if (counting) {
2604 spin_lock(&fs_info->balance_lock);
2605 bctl->stat.expected++;
2606 spin_unlock(&fs_info->balance_lock);
2607 goto loop;
2608 }
2609
2610 ret = btrfs_relocate_chunk(chunk_root,
2611 chunk_root->root_key.objectid,
2612 found_key.objectid,
2613 found_key.offset);
2614 if (ret && ret != -ENOSPC)
2615 goto error;
2616 if (ret == -ENOSPC) {
2617 enospc_errors++;
2618 } else {
2619 spin_lock(&fs_info->balance_lock);
2620 bctl->stat.completed++;
2621 spin_unlock(&fs_info->balance_lock);
2622 }
2623 loop:
2624 key.offset = found_key.offset - 1;
2625 }
2626
2627 if (counting) {
2628 btrfs_release_path(path);
2629 counting = false;
2630 goto again;
2631 }
2632 error:
2633 btrfs_free_path(path);
2634 if (enospc_errors) {
2635 printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
2636 enospc_errors);
2637 if (!ret)
2638 ret = -ENOSPC;
2639 }
2640
2641 return ret;
2642 }
2643
2644 /**
2645 * alloc_profile_is_valid - see if a given profile is valid and reduced
2646 * @flags: profile to validate
2647 * @extended: if true @flags is treated as an extended profile
2648 */
alloc_profile_is_valid(u64 flags,int extended)2649 static int alloc_profile_is_valid(u64 flags, int extended)
2650 {
2651 u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
2652 BTRFS_BLOCK_GROUP_PROFILE_MASK);
2653
2654 flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
2655
2656 /* 1) check that all other bits are zeroed */
2657 if (flags & ~mask)
2658 return 0;
2659
2660 /* 2) see if profile is reduced */
2661 if (flags == 0)
2662 return !extended; /* "0" is valid for usual profiles */
2663
2664 /* true if exactly one bit set */
2665 return (flags & (flags - 1)) == 0;
2666 }
2667
balance_need_close(struct btrfs_fs_info * fs_info)2668 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
2669 {
2670 /* cancel requested || normal exit path */
2671 return atomic_read(&fs_info->balance_cancel_req) ||
2672 (atomic_read(&fs_info->balance_pause_req) == 0 &&
2673 atomic_read(&fs_info->balance_cancel_req) == 0);
2674 }
2675
__cancel_balance(struct btrfs_fs_info * fs_info)2676 static void __cancel_balance(struct btrfs_fs_info *fs_info)
2677 {
2678 int ret;
2679
2680 unset_balance_control(fs_info);
2681 ret = del_balance_item(fs_info->tree_root);
2682 BUG_ON(ret);
2683 }
2684
2685 void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
2686 struct btrfs_ioctl_balance_args *bargs);
2687
2688 /*
2689 * Should be called with both balance and volume mutexes held
2690 */
btrfs_balance(struct btrfs_balance_control * bctl,struct btrfs_ioctl_balance_args * bargs)2691 int btrfs_balance(struct btrfs_balance_control *bctl,
2692 struct btrfs_ioctl_balance_args *bargs)
2693 {
2694 struct btrfs_fs_info *fs_info = bctl->fs_info;
2695 u64 allowed;
2696 int mixed = 0;
2697 int ret;
2698
2699 if (btrfs_fs_closing(fs_info) ||
2700 atomic_read(&fs_info->balance_pause_req) ||
2701 atomic_read(&fs_info->balance_cancel_req)) {
2702 ret = -EINVAL;
2703 goto out;
2704 }
2705
2706 allowed = btrfs_super_incompat_flags(fs_info->super_copy);
2707 if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
2708 mixed = 1;
2709
2710 /*
2711 * In case of mixed groups both data and meta should be picked,
2712 * and identical options should be given for both of them.
2713 */
2714 allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
2715 if (mixed && (bctl->flags & allowed)) {
2716 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
2717 !(bctl->flags & BTRFS_BALANCE_METADATA) ||
2718 memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
2719 printk(KERN_ERR "btrfs: with mixed groups data and "
2720 "metadata balance options must be the same\n");
2721 ret = -EINVAL;
2722 goto out;
2723 }
2724 }
2725
2726 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
2727 if (fs_info->fs_devices->num_devices == 1)
2728 allowed |= BTRFS_BLOCK_GROUP_DUP;
2729 else if (fs_info->fs_devices->num_devices < 4)
2730 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
2731 else
2732 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
2733 BTRFS_BLOCK_GROUP_RAID10);
2734
2735 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2736 (!alloc_profile_is_valid(bctl->data.target, 1) ||
2737 (bctl->data.target & ~allowed))) {
2738 printk(KERN_ERR "btrfs: unable to start balance with target "
2739 "data profile %llu\n",
2740 (unsigned long long)bctl->data.target);
2741 ret = -EINVAL;
2742 goto out;
2743 }
2744 if ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2745 (!alloc_profile_is_valid(bctl->meta.target, 1) ||
2746 (bctl->meta.target & ~allowed))) {
2747 printk(KERN_ERR "btrfs: unable to start balance with target "
2748 "metadata profile %llu\n",
2749 (unsigned long long)bctl->meta.target);
2750 ret = -EINVAL;
2751 goto out;
2752 }
2753 if ((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2754 (!alloc_profile_is_valid(bctl->sys.target, 1) ||
2755 (bctl->sys.target & ~allowed))) {
2756 printk(KERN_ERR "btrfs: unable to start balance with target "
2757 "system profile %llu\n",
2758 (unsigned long long)bctl->sys.target);
2759 ret = -EINVAL;
2760 goto out;
2761 }
2762
2763 /* allow dup'ed data chunks only in mixed mode */
2764 if (!mixed && (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2765 (bctl->data.target & BTRFS_BLOCK_GROUP_DUP)) {
2766 printk(KERN_ERR "btrfs: dup for data is not allowed\n");
2767 ret = -EINVAL;
2768 goto out;
2769 }
2770
2771 /* allow to reduce meta or sys integrity only if force set */
2772 allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2773 BTRFS_BLOCK_GROUP_RAID10;
2774 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2775 (fs_info->avail_system_alloc_bits & allowed) &&
2776 !(bctl->sys.target & allowed)) ||
2777 ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
2778 (fs_info->avail_metadata_alloc_bits & allowed) &&
2779 !(bctl->meta.target & allowed))) {
2780 if (bctl->flags & BTRFS_BALANCE_FORCE) {
2781 printk(KERN_INFO "btrfs: force reducing metadata "
2782 "integrity\n");
2783 } else {
2784 printk(KERN_ERR "btrfs: balance will reduce metadata "
2785 "integrity, use force if you want this\n");
2786 ret = -EINVAL;
2787 goto out;
2788 }
2789 }
2790
2791 ret = insert_balance_item(fs_info->tree_root, bctl);
2792 if (ret && ret != -EEXIST)
2793 goto out;
2794
2795 if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
2796 BUG_ON(ret == -EEXIST);
2797 set_balance_control(bctl);
2798 } else {
2799 BUG_ON(ret != -EEXIST);
2800 spin_lock(&fs_info->balance_lock);
2801 update_balance_args(bctl);
2802 spin_unlock(&fs_info->balance_lock);
2803 }
2804
2805 atomic_inc(&fs_info->balance_running);
2806 mutex_unlock(&fs_info->balance_mutex);
2807
2808 ret = __btrfs_balance(fs_info);
2809
2810 mutex_lock(&fs_info->balance_mutex);
2811 atomic_dec(&fs_info->balance_running);
2812
2813 if (bargs) {
2814 memset(bargs, 0, sizeof(*bargs));
2815 update_ioctl_balance_args(fs_info, 0, bargs);
2816 }
2817
2818 if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
2819 balance_need_close(fs_info)) {
2820 __cancel_balance(fs_info);
2821 }
2822
2823 wake_up(&fs_info->balance_wait_q);
2824
2825 return ret;
2826 out:
2827 if (bctl->flags & BTRFS_BALANCE_RESUME)
2828 __cancel_balance(fs_info);
2829 else
2830 kfree(bctl);
2831 return ret;
2832 }
2833
balance_kthread(void * data)2834 static int balance_kthread(void *data)
2835 {
2836 struct btrfs_balance_control *bctl =
2837 (struct btrfs_balance_control *)data;
2838 struct btrfs_fs_info *fs_info = bctl->fs_info;
2839 int ret = 0;
2840
2841 mutex_lock(&fs_info->volume_mutex);
2842 mutex_lock(&fs_info->balance_mutex);
2843
2844 set_balance_control(bctl);
2845
2846 if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
2847 printk(KERN_INFO "btrfs: force skipping balance\n");
2848 } else {
2849 printk(KERN_INFO "btrfs: continuing balance\n");
2850 ret = btrfs_balance(bctl, NULL);
2851 }
2852
2853 mutex_unlock(&fs_info->balance_mutex);
2854 mutex_unlock(&fs_info->volume_mutex);
2855 return ret;
2856 }
2857
btrfs_recover_balance(struct btrfs_root * tree_root)2858 int btrfs_recover_balance(struct btrfs_root *tree_root)
2859 {
2860 struct task_struct *tsk;
2861 struct btrfs_balance_control *bctl;
2862 struct btrfs_balance_item *item;
2863 struct btrfs_disk_balance_args disk_bargs;
2864 struct btrfs_path *path;
2865 struct extent_buffer *leaf;
2866 struct btrfs_key key;
2867 int ret;
2868
2869 path = btrfs_alloc_path();
2870 if (!path)
2871 return -ENOMEM;
2872
2873 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
2874 if (!bctl) {
2875 ret = -ENOMEM;
2876 goto out;
2877 }
2878
2879 key.objectid = BTRFS_BALANCE_OBJECTID;
2880 key.type = BTRFS_BALANCE_ITEM_KEY;
2881 key.offset = 0;
2882
2883 ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
2884 if (ret < 0)
2885 goto out_bctl;
2886 if (ret > 0) { /* ret = -ENOENT; */
2887 ret = 0;
2888 goto out_bctl;
2889 }
2890
2891 leaf = path->nodes[0];
2892 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
2893
2894 bctl->fs_info = tree_root->fs_info;
2895 bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
2896
2897 btrfs_balance_data(leaf, item, &disk_bargs);
2898 btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
2899 btrfs_balance_meta(leaf, item, &disk_bargs);
2900 btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
2901 btrfs_balance_sys(leaf, item, &disk_bargs);
2902 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
2903
2904 tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
2905 if (IS_ERR(tsk))
2906 ret = PTR_ERR(tsk);
2907 else
2908 goto out;
2909
2910 out_bctl:
2911 kfree(bctl);
2912 out:
2913 btrfs_free_path(path);
2914 return ret;
2915 }
2916
btrfs_pause_balance(struct btrfs_fs_info * fs_info)2917 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
2918 {
2919 int ret = 0;
2920
2921 mutex_lock(&fs_info->balance_mutex);
2922 if (!fs_info->balance_ctl) {
2923 mutex_unlock(&fs_info->balance_mutex);
2924 return -ENOTCONN;
2925 }
2926
2927 if (atomic_read(&fs_info->balance_running)) {
2928 atomic_inc(&fs_info->balance_pause_req);
2929 mutex_unlock(&fs_info->balance_mutex);
2930
2931 wait_event(fs_info->balance_wait_q,
2932 atomic_read(&fs_info->balance_running) == 0);
2933
2934 mutex_lock(&fs_info->balance_mutex);
2935 /* we are good with balance_ctl ripped off from under us */
2936 BUG_ON(atomic_read(&fs_info->balance_running));
2937 atomic_dec(&fs_info->balance_pause_req);
2938 } else {
2939 ret = -ENOTCONN;
2940 }
2941
2942 mutex_unlock(&fs_info->balance_mutex);
2943 return ret;
2944 }
2945
btrfs_cancel_balance(struct btrfs_fs_info * fs_info)2946 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
2947 {
2948 mutex_lock(&fs_info->balance_mutex);
2949 if (!fs_info->balance_ctl) {
2950 mutex_unlock(&fs_info->balance_mutex);
2951 return -ENOTCONN;
2952 }
2953
2954 atomic_inc(&fs_info->balance_cancel_req);
2955 /*
2956 * if we are running just wait and return, balance item is
2957 * deleted in btrfs_balance in this case
2958 */
2959 if (atomic_read(&fs_info->balance_running)) {
2960 mutex_unlock(&fs_info->balance_mutex);
2961 wait_event(fs_info->balance_wait_q,
2962 atomic_read(&fs_info->balance_running) == 0);
2963 mutex_lock(&fs_info->balance_mutex);
2964 } else {
2965 /* __cancel_balance needs volume_mutex */
2966 mutex_unlock(&fs_info->balance_mutex);
2967 mutex_lock(&fs_info->volume_mutex);
2968 mutex_lock(&fs_info->balance_mutex);
2969
2970 if (fs_info->balance_ctl)
2971 __cancel_balance(fs_info);
2972
2973 mutex_unlock(&fs_info->volume_mutex);
2974 }
2975
2976 BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
2977 atomic_dec(&fs_info->balance_cancel_req);
2978 mutex_unlock(&fs_info->balance_mutex);
2979 return 0;
2980 }
2981
2982 /*
2983 * shrinking a device means finding all of the device extents past
2984 * the new size, and then following the back refs to the chunks.
2985 * The chunk relocation code actually frees the device extent
2986 */
btrfs_shrink_device(struct btrfs_device * device,u64 new_size)2987 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
2988 {
2989 struct btrfs_trans_handle *trans;
2990 struct btrfs_root *root = device->dev_root;
2991 struct btrfs_dev_extent *dev_extent = NULL;
2992 struct btrfs_path *path;
2993 u64 length;
2994 u64 chunk_tree;
2995 u64 chunk_objectid;
2996 u64 chunk_offset;
2997 int ret;
2998 int slot;
2999 int failed = 0;
3000 bool retried = false;
3001 struct extent_buffer *l;
3002 struct btrfs_key key;
3003 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3004 u64 old_total = btrfs_super_total_bytes(super_copy);
3005 u64 old_size = device->total_bytes;
3006 u64 diff = device->total_bytes - new_size;
3007
3008 if (new_size >= device->total_bytes)
3009 return -EINVAL;
3010
3011 path = btrfs_alloc_path();
3012 if (!path)
3013 return -ENOMEM;
3014
3015 path->reada = 2;
3016
3017 lock_chunks(root);
3018
3019 device->total_bytes = new_size;
3020 if (device->writeable) {
3021 device->fs_devices->total_rw_bytes -= diff;
3022 spin_lock(&root->fs_info->free_chunk_lock);
3023 root->fs_info->free_chunk_space -= diff;
3024 spin_unlock(&root->fs_info->free_chunk_lock);
3025 }
3026 unlock_chunks(root);
3027
3028 again:
3029 key.objectid = device->devid;
3030 key.offset = (u64)-1;
3031 key.type = BTRFS_DEV_EXTENT_KEY;
3032
3033 do {
3034 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3035 if (ret < 0)
3036 goto done;
3037
3038 ret = btrfs_previous_item(root, path, 0, key.type);
3039 if (ret < 0)
3040 goto done;
3041 if (ret) {
3042 ret = 0;
3043 btrfs_release_path(path);
3044 break;
3045 }
3046
3047 l = path->nodes[0];
3048 slot = path->slots[0];
3049 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
3050
3051 if (key.objectid != device->devid) {
3052 btrfs_release_path(path);
3053 break;
3054 }
3055
3056 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3057 length = btrfs_dev_extent_length(l, dev_extent);
3058
3059 if (key.offset + length <= new_size) {
3060 btrfs_release_path(path);
3061 break;
3062 }
3063
3064 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3065 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3066 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3067 btrfs_release_path(path);
3068
3069 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
3070 chunk_offset);
3071 if (ret && ret != -ENOSPC)
3072 goto done;
3073 if (ret == -ENOSPC)
3074 failed++;
3075 } while (key.offset-- > 0);
3076
3077 if (failed && !retried) {
3078 failed = 0;
3079 retried = true;
3080 goto again;
3081 } else if (failed && retried) {
3082 ret = -ENOSPC;
3083 lock_chunks(root);
3084
3085 device->total_bytes = old_size;
3086 if (device->writeable)
3087 device->fs_devices->total_rw_bytes += diff;
3088 spin_lock(&root->fs_info->free_chunk_lock);
3089 root->fs_info->free_chunk_space += diff;
3090 spin_unlock(&root->fs_info->free_chunk_lock);
3091 unlock_chunks(root);
3092 goto done;
3093 }
3094
3095 /* Shrinking succeeded, else we would be at "done". */
3096 trans = btrfs_start_transaction(root, 0);
3097 if (IS_ERR(trans)) {
3098 ret = PTR_ERR(trans);
3099 goto done;
3100 }
3101
3102 lock_chunks(root);
3103
3104 device->disk_total_bytes = new_size;
3105 /* Now btrfs_update_device() will change the on-disk size. */
3106 ret = btrfs_update_device(trans, device);
3107 if (ret) {
3108 unlock_chunks(root);
3109 btrfs_end_transaction(trans, root);
3110 goto done;
3111 }
3112 WARN_ON(diff > old_total);
3113 btrfs_set_super_total_bytes(super_copy, old_total - diff);
3114 unlock_chunks(root);
3115 btrfs_end_transaction(trans, root);
3116 done:
3117 btrfs_free_path(path);
3118 return ret;
3119 }
3120
btrfs_add_system_chunk(struct btrfs_root * root,struct btrfs_key * key,struct btrfs_chunk * chunk,int item_size)3121 static int btrfs_add_system_chunk(struct btrfs_root *root,
3122 struct btrfs_key *key,
3123 struct btrfs_chunk *chunk, int item_size)
3124 {
3125 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
3126 struct btrfs_disk_key disk_key;
3127 u32 array_size;
3128 u8 *ptr;
3129
3130 array_size = btrfs_super_sys_array_size(super_copy);
3131 if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
3132 return -EFBIG;
3133
3134 ptr = super_copy->sys_chunk_array + array_size;
3135 btrfs_cpu_key_to_disk(&disk_key, key);
3136 memcpy(ptr, &disk_key, sizeof(disk_key));
3137 ptr += sizeof(disk_key);
3138 memcpy(ptr, chunk, item_size);
3139 item_size += sizeof(disk_key);
3140 btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
3141 return 0;
3142 }
3143
3144 /*
3145 * sort the devices in descending order by max_avail, total_avail
3146 */
btrfs_cmp_device_info(const void * a,const void * b)3147 static int btrfs_cmp_device_info(const void *a, const void *b)
3148 {
3149 const struct btrfs_device_info *di_a = a;
3150 const struct btrfs_device_info *di_b = b;
3151
3152 if (di_a->max_avail > di_b->max_avail)
3153 return -1;
3154 if (di_a->max_avail < di_b->max_avail)
3155 return 1;
3156 if (di_a->total_avail > di_b->total_avail)
3157 return -1;
3158 if (di_a->total_avail < di_b->total_avail)
3159 return 1;
3160 return 0;
3161 }
3162
__btrfs_alloc_chunk(struct btrfs_trans_handle * trans,struct btrfs_root * extent_root,struct map_lookup ** map_ret,u64 * num_bytes_out,u64 * stripe_size_out,u64 start,u64 type)3163 static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3164 struct btrfs_root *extent_root,
3165 struct map_lookup **map_ret,
3166 u64 *num_bytes_out, u64 *stripe_size_out,
3167 u64 start, u64 type)
3168 {
3169 struct btrfs_fs_info *info = extent_root->fs_info;
3170 struct btrfs_fs_devices *fs_devices = info->fs_devices;
3171 struct list_head *cur;
3172 struct map_lookup *map = NULL;
3173 struct extent_map_tree *em_tree;
3174 struct extent_map *em;
3175 struct btrfs_device_info *devices_info = NULL;
3176 u64 total_avail;
3177 int num_stripes; /* total number of stripes to allocate */
3178 int sub_stripes; /* sub_stripes info for map */
3179 int dev_stripes; /* stripes per dev */
3180 int devs_max; /* max devs to use */
3181 int devs_min; /* min devs needed */
3182 int devs_increment; /* ndevs has to be a multiple of this */
3183 int ncopies; /* how many copies to data has */
3184 int ret;
3185 u64 max_stripe_size;
3186 u64 max_chunk_size;
3187 u64 stripe_size;
3188 u64 num_bytes;
3189 int ndevs;
3190 int i;
3191 int j;
3192
3193 BUG_ON(!alloc_profile_is_valid(type, 0));
3194
3195 if (list_empty(&fs_devices->alloc_list))
3196 return -ENOSPC;
3197
3198 sub_stripes = 1;
3199 dev_stripes = 1;
3200 devs_increment = 1;
3201 ncopies = 1;
3202 devs_max = 0; /* 0 == as many as possible */
3203 devs_min = 1;
3204
3205 /*
3206 * define the properties of each RAID type.
3207 * FIXME: move this to a global table and use it in all RAID
3208 * calculation code
3209 */
3210 if (type & (BTRFS_BLOCK_GROUP_DUP)) {
3211 dev_stripes = 2;
3212 ncopies = 2;
3213 devs_max = 1;
3214 } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
3215 devs_min = 2;
3216 } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
3217 devs_increment = 2;
3218 ncopies = 2;
3219 devs_max = 2;
3220 devs_min = 2;
3221 } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
3222 sub_stripes = 2;
3223 devs_increment = 2;
3224 ncopies = 2;
3225 devs_min = 4;
3226 } else {
3227 devs_max = 1;
3228 }
3229
3230 if (type & BTRFS_BLOCK_GROUP_DATA) {
3231 max_stripe_size = 1024 * 1024 * 1024;
3232 max_chunk_size = 10 * max_stripe_size;
3233 } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
3234 /* for larger filesystems, use larger metadata chunks */
3235 if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
3236 max_stripe_size = 1024 * 1024 * 1024;
3237 else
3238 max_stripe_size = 256 * 1024 * 1024;
3239 max_chunk_size = max_stripe_size;
3240 } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
3241 max_stripe_size = 32 * 1024 * 1024;
3242 max_chunk_size = 2 * max_stripe_size;
3243 } else {
3244 printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
3245 type);
3246 BUG_ON(1);
3247 }
3248
3249 /* we don't want a chunk larger than 10% of writeable space */
3250 max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
3251 max_chunk_size);
3252
3253 devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
3254 GFP_NOFS);
3255 if (!devices_info)
3256 return -ENOMEM;
3257
3258 cur = fs_devices->alloc_list.next;
3259
3260 /*
3261 * in the first pass through the devices list, we gather information
3262 * about the available holes on each device.
3263 */
3264 ndevs = 0;
3265 while (cur != &fs_devices->alloc_list) {
3266 struct btrfs_device *device;
3267 u64 max_avail;
3268 u64 dev_offset;
3269
3270 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
3271
3272 cur = cur->next;
3273
3274 if (!device->writeable) {
3275 printk(KERN_ERR
3276 "btrfs: read-only device in alloc_list\n");
3277 WARN_ON(1);
3278 continue;
3279 }
3280
3281 if (!device->in_fs_metadata)
3282 continue;
3283
3284 if (device->total_bytes > device->bytes_used)
3285 total_avail = device->total_bytes - device->bytes_used;
3286 else
3287 total_avail = 0;
3288
3289 /* If there is no space on this device, skip it. */
3290 if (total_avail == 0)
3291 continue;
3292
3293 ret = find_free_dev_extent(device,
3294 max_stripe_size * dev_stripes,
3295 &dev_offset, &max_avail);
3296 if (ret && ret != -ENOSPC)
3297 goto error;
3298
3299 if (ret == 0)
3300 max_avail = max_stripe_size * dev_stripes;
3301
3302 if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
3303 continue;
3304
3305 devices_info[ndevs].dev_offset = dev_offset;
3306 devices_info[ndevs].max_avail = max_avail;
3307 devices_info[ndevs].total_avail = total_avail;
3308 devices_info[ndevs].dev = device;
3309 ++ndevs;
3310 }
3311
3312 /*
3313 * now sort the devices by hole size / available space
3314 */
3315 sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
3316 btrfs_cmp_device_info, NULL);
3317
3318 /* round down to number of usable stripes */
3319 ndevs -= ndevs % devs_increment;
3320
3321 if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
3322 ret = -ENOSPC;
3323 goto error;
3324 }
3325
3326 if (devs_max && ndevs > devs_max)
3327 ndevs = devs_max;
3328 /*
3329 * the primary goal is to maximize the number of stripes, so use as many
3330 * devices as possible, even if the stripes are not maximum sized.
3331 */
3332 stripe_size = devices_info[ndevs-1].max_avail;
3333 num_stripes = ndevs * dev_stripes;
3334
3335 if (stripe_size * ndevs > max_chunk_size * ncopies) {
3336 stripe_size = max_chunk_size * ncopies;
3337 do_div(stripe_size, ndevs);
3338 }
3339
3340 do_div(stripe_size, dev_stripes);
3341
3342 /* align to BTRFS_STRIPE_LEN */
3343 do_div(stripe_size, BTRFS_STRIPE_LEN);
3344 stripe_size *= BTRFS_STRIPE_LEN;
3345
3346 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
3347 if (!map) {
3348 ret = -ENOMEM;
3349 goto error;
3350 }
3351 map->num_stripes = num_stripes;
3352
3353 for (i = 0; i < ndevs; ++i) {
3354 for (j = 0; j < dev_stripes; ++j) {
3355 int s = i * dev_stripes + j;
3356 map->stripes[s].dev = devices_info[i].dev;
3357 map->stripes[s].physical = devices_info[i].dev_offset +
3358 j * stripe_size;
3359 }
3360 }
3361 map->sector_size = extent_root->sectorsize;
3362 map->stripe_len = BTRFS_STRIPE_LEN;
3363 map->io_align = BTRFS_STRIPE_LEN;
3364 map->io_width = BTRFS_STRIPE_LEN;
3365 map->type = type;
3366 map->sub_stripes = sub_stripes;
3367
3368 *map_ret = map;
3369 num_bytes = stripe_size * (num_stripes / ncopies);
3370
3371 *stripe_size_out = stripe_size;
3372 *num_bytes_out = num_bytes;
3373
3374 trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
3375
3376 em = alloc_extent_map();
3377 if (!em) {
3378 ret = -ENOMEM;
3379 goto error;
3380 }
3381 em->bdev = (struct block_device *)map;
3382 em->start = start;
3383 em->len = num_bytes;
3384 em->block_start = 0;
3385 em->block_len = em->len;
3386
3387 em_tree = &extent_root->fs_info->mapping_tree.map_tree;
3388 write_lock(&em_tree->lock);
3389 ret = add_extent_mapping(em_tree, em);
3390 write_unlock(&em_tree->lock);
3391 free_extent_map(em);
3392 if (ret)
3393 goto error;
3394
3395 ret = btrfs_make_block_group(trans, extent_root, 0, type,
3396 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3397 start, num_bytes);
3398 if (ret)
3399 goto error;
3400
3401 for (i = 0; i < map->num_stripes; ++i) {
3402 struct btrfs_device *device;
3403 u64 dev_offset;
3404
3405 device = map->stripes[i].dev;
3406 dev_offset = map->stripes[i].physical;
3407
3408 ret = btrfs_alloc_dev_extent(trans, device,
3409 info->chunk_root->root_key.objectid,
3410 BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3411 start, dev_offset, stripe_size);
3412 if (ret) {
3413 btrfs_abort_transaction(trans, extent_root, ret);
3414 goto error;
3415 }
3416 }
3417
3418 kfree(devices_info);
3419 return 0;
3420
3421 error:
3422 kfree(map);
3423 kfree(devices_info);
3424 return ret;
3425 }
3426
__finish_chunk_alloc(struct btrfs_trans_handle * trans,struct btrfs_root * extent_root,struct map_lookup * map,u64 chunk_offset,u64 chunk_size,u64 stripe_size)3427 static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
3428 struct btrfs_root *extent_root,
3429 struct map_lookup *map, u64 chunk_offset,
3430 u64 chunk_size, u64 stripe_size)
3431 {
3432 u64 dev_offset;
3433 struct btrfs_key key;
3434 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3435 struct btrfs_device *device;
3436 struct btrfs_chunk *chunk;
3437 struct btrfs_stripe *stripe;
3438 size_t item_size = btrfs_chunk_item_size(map->num_stripes);
3439 int index = 0;
3440 int ret;
3441
3442 chunk = kzalloc(item_size, GFP_NOFS);
3443 if (!chunk)
3444 return -ENOMEM;
3445
3446 index = 0;
3447 while (index < map->num_stripes) {
3448 device = map->stripes[index].dev;
3449 device->bytes_used += stripe_size;
3450 ret = btrfs_update_device(trans, device);
3451 if (ret)
3452 goto out_free;
3453 index++;
3454 }
3455
3456 spin_lock(&extent_root->fs_info->free_chunk_lock);
3457 extent_root->fs_info->free_chunk_space -= (stripe_size *
3458 map->num_stripes);
3459 spin_unlock(&extent_root->fs_info->free_chunk_lock);
3460
3461 index = 0;
3462 stripe = &chunk->stripe;
3463 while (index < map->num_stripes) {
3464 device = map->stripes[index].dev;
3465 dev_offset = map->stripes[index].physical;
3466
3467 btrfs_set_stack_stripe_devid(stripe, device->devid);
3468 btrfs_set_stack_stripe_offset(stripe, dev_offset);
3469 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
3470 stripe++;
3471 index++;
3472 }
3473
3474 btrfs_set_stack_chunk_length(chunk, chunk_size);
3475 btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
3476 btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
3477 btrfs_set_stack_chunk_type(chunk, map->type);
3478 btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
3479 btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
3480 btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
3481 btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
3482 btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
3483
3484 key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3485 key.type = BTRFS_CHUNK_ITEM_KEY;
3486 key.offset = chunk_offset;
3487
3488 ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
3489
3490 if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3491 /*
3492 * TODO: Cleanup of inserted chunk root in case of
3493 * failure.
3494 */
3495 ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
3496 item_size);
3497 }
3498
3499 out_free:
3500 kfree(chunk);
3501 return ret;
3502 }
3503
3504 /*
3505 * Chunk allocation falls into two parts. The first part does works
3506 * that make the new allocated chunk useable, but not do any operation
3507 * that modifies the chunk tree. The second part does the works that
3508 * require modifying the chunk tree. This division is important for the
3509 * bootstrap process of adding storage to a seed btrfs.
3510 */
btrfs_alloc_chunk(struct btrfs_trans_handle * trans,struct btrfs_root * extent_root,u64 type)3511 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
3512 struct btrfs_root *extent_root, u64 type)
3513 {
3514 u64 chunk_offset;
3515 u64 chunk_size;
3516 u64 stripe_size;
3517 struct map_lookup *map;
3518 struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
3519 int ret;
3520
3521 ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
3522 &chunk_offset);
3523 if (ret)
3524 return ret;
3525
3526 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3527 &stripe_size, chunk_offset, type);
3528 if (ret)
3529 return ret;
3530
3531 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3532 chunk_size, stripe_size);
3533 if (ret)
3534 return ret;
3535 return 0;
3536 }
3537
init_first_rw_device(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_device * device)3538 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
3539 struct btrfs_root *root,
3540 struct btrfs_device *device)
3541 {
3542 u64 chunk_offset;
3543 u64 sys_chunk_offset;
3544 u64 chunk_size;
3545 u64 sys_chunk_size;
3546 u64 stripe_size;
3547 u64 sys_stripe_size;
3548 u64 alloc_profile;
3549 struct map_lookup *map;
3550 struct map_lookup *sys_map;
3551 struct btrfs_fs_info *fs_info = root->fs_info;
3552 struct btrfs_root *extent_root = fs_info->extent_root;
3553 int ret;
3554
3555 ret = find_next_chunk(fs_info->chunk_root,
3556 BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
3557 if (ret)
3558 return ret;
3559
3560 alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
3561 fs_info->avail_metadata_alloc_bits;
3562 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3563
3564 ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
3565 &stripe_size, chunk_offset, alloc_profile);
3566 if (ret)
3567 return ret;
3568
3569 sys_chunk_offset = chunk_offset + chunk_size;
3570
3571 alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
3572 fs_info->avail_system_alloc_bits;
3573 alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
3574
3575 ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
3576 &sys_chunk_size, &sys_stripe_size,
3577 sys_chunk_offset, alloc_profile);
3578 if (ret)
3579 goto abort;
3580
3581 ret = btrfs_add_device(trans, fs_info->chunk_root, device);
3582 if (ret)
3583 goto abort;
3584
3585 /*
3586 * Modifying chunk tree needs allocating new blocks from both
3587 * system block group and metadata block group. So we only can
3588 * do operations require modifying the chunk tree after both
3589 * block groups were created.
3590 */
3591 ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
3592 chunk_size, stripe_size);
3593 if (ret)
3594 goto abort;
3595
3596 ret = __finish_chunk_alloc(trans, extent_root, sys_map,
3597 sys_chunk_offset, sys_chunk_size,
3598 sys_stripe_size);
3599 if (ret)
3600 goto abort;
3601
3602 return 0;
3603
3604 abort:
3605 btrfs_abort_transaction(trans, root, ret);
3606 return ret;
3607 }
3608
btrfs_chunk_readonly(struct btrfs_root * root,u64 chunk_offset)3609 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
3610 {
3611 struct extent_map *em;
3612 struct map_lookup *map;
3613 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
3614 int readonly = 0;
3615 int i;
3616
3617 read_lock(&map_tree->map_tree.lock);
3618 em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3619 read_unlock(&map_tree->map_tree.lock);
3620 if (!em)
3621 return 1;
3622
3623 if (btrfs_test_opt(root, DEGRADED)) {
3624 free_extent_map(em);
3625 return 0;
3626 }
3627
3628 map = (struct map_lookup *)em->bdev;
3629 for (i = 0; i < map->num_stripes; i++) {
3630 if (!map->stripes[i].dev->writeable) {
3631 readonly = 1;
3632 break;
3633 }
3634 }
3635 free_extent_map(em);
3636 return readonly;
3637 }
3638
btrfs_mapping_init(struct btrfs_mapping_tree * tree)3639 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
3640 {
3641 extent_map_tree_init(&tree->map_tree);
3642 }
3643
btrfs_mapping_tree_free(struct btrfs_mapping_tree * tree)3644 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
3645 {
3646 struct extent_map *em;
3647
3648 while (1) {
3649 write_lock(&tree->map_tree.lock);
3650 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
3651 if (em)
3652 remove_extent_mapping(&tree->map_tree, em);
3653 write_unlock(&tree->map_tree.lock);
3654 if (!em)
3655 break;
3656 kfree(em->bdev);
3657 /* once for us */
3658 free_extent_map(em);
3659 /* once for the tree */
3660 free_extent_map(em);
3661 }
3662 }
3663
btrfs_num_copies(struct btrfs_mapping_tree * map_tree,u64 logical,u64 len)3664 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
3665 {
3666 struct extent_map *em;
3667 struct map_lookup *map;
3668 struct extent_map_tree *em_tree = &map_tree->map_tree;
3669 int ret;
3670
3671 read_lock(&em_tree->lock);
3672 em = lookup_extent_mapping(em_tree, logical, len);
3673 read_unlock(&em_tree->lock);
3674 BUG_ON(!em);
3675
3676 BUG_ON(em->start > logical || em->start + em->len < logical);
3677 map = (struct map_lookup *)em->bdev;
3678 if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
3679 ret = map->num_stripes;
3680 else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3681 ret = map->sub_stripes;
3682 else
3683 ret = 1;
3684 free_extent_map(em);
3685 return ret;
3686 }
3687
find_live_mirror(struct map_lookup * map,int first,int num,int optimal)3688 static int find_live_mirror(struct map_lookup *map, int first, int num,
3689 int optimal)
3690 {
3691 int i;
3692 if (map->stripes[optimal].dev->bdev)
3693 return optimal;
3694 for (i = first; i < first + num; i++) {
3695 if (map->stripes[i].dev->bdev)
3696 return i;
3697 }
3698 /* we couldn't find one that doesn't fail. Just return something
3699 * and the io error handling code will clean up eventually
3700 */
3701 return optimal;
3702 }
3703
__btrfs_map_block(struct btrfs_mapping_tree * map_tree,int rw,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num)3704 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3705 u64 logical, u64 *length,
3706 struct btrfs_bio **bbio_ret,
3707 int mirror_num)
3708 {
3709 struct extent_map *em;
3710 struct map_lookup *map;
3711 struct extent_map_tree *em_tree = &map_tree->map_tree;
3712 u64 offset;
3713 u64 stripe_offset;
3714 u64 stripe_end_offset;
3715 u64 stripe_nr;
3716 u64 stripe_nr_orig;
3717 u64 stripe_nr_end;
3718 int stripe_index;
3719 int i;
3720 int ret = 0;
3721 int num_stripes;
3722 int max_errors = 0;
3723 struct btrfs_bio *bbio = NULL;
3724
3725 read_lock(&em_tree->lock);
3726 em = lookup_extent_mapping(em_tree, logical, *length);
3727 read_unlock(&em_tree->lock);
3728
3729 if (!em) {
3730 printk(KERN_CRIT "unable to find logical %llu len %llu\n",
3731 (unsigned long long)logical,
3732 (unsigned long long)*length);
3733 BUG();
3734 }
3735
3736 BUG_ON(em->start > logical || em->start + em->len < logical);
3737 map = (struct map_lookup *)em->bdev;
3738 offset = logical - em->start;
3739
3740 if (mirror_num > map->num_stripes)
3741 mirror_num = 0;
3742
3743 stripe_nr = offset;
3744 /*
3745 * stripe_nr counts the total number of stripes we have to stride
3746 * to get to this block
3747 */
3748 do_div(stripe_nr, map->stripe_len);
3749
3750 stripe_offset = stripe_nr * map->stripe_len;
3751 BUG_ON(offset < stripe_offset);
3752
3753 /* stripe_offset is the offset of this block in its stripe*/
3754 stripe_offset = offset - stripe_offset;
3755
3756 if (rw & REQ_DISCARD)
3757 *length = min_t(u64, em->len - offset, *length);
3758 else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
3759 /* we limit the length of each bio to what fits in a stripe */
3760 *length = min_t(u64, em->len - offset,
3761 map->stripe_len - stripe_offset);
3762 } else {
3763 *length = em->len - offset;
3764 }
3765
3766 if (!bbio_ret)
3767 goto out;
3768
3769 num_stripes = 1;
3770 stripe_index = 0;
3771 stripe_nr_orig = stripe_nr;
3772 stripe_nr_end = (offset + *length + map->stripe_len - 1) &
3773 (~(map->stripe_len - 1));
3774 do_div(stripe_nr_end, map->stripe_len);
3775 stripe_end_offset = stripe_nr_end * map->stripe_len -
3776 (offset + *length);
3777 if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3778 if (rw & REQ_DISCARD)
3779 num_stripes = min_t(u64, map->num_stripes,
3780 stripe_nr_end - stripe_nr_orig);
3781 stripe_index = do_div(stripe_nr, map->num_stripes);
3782 } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3783 if (rw & (REQ_WRITE | REQ_DISCARD))
3784 num_stripes = map->num_stripes;
3785 else if (mirror_num)
3786 stripe_index = mirror_num - 1;
3787 else {
3788 stripe_index = find_live_mirror(map, 0,
3789 map->num_stripes,
3790 current->pid % map->num_stripes);
3791 mirror_num = stripe_index + 1;
3792 }
3793
3794 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3795 if (rw & (REQ_WRITE | REQ_DISCARD)) {
3796 num_stripes = map->num_stripes;
3797 } else if (mirror_num) {
3798 stripe_index = mirror_num - 1;
3799 } else {
3800 mirror_num = 1;
3801 }
3802
3803 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3804 int factor = map->num_stripes / map->sub_stripes;
3805
3806 stripe_index = do_div(stripe_nr, factor);
3807 stripe_index *= map->sub_stripes;
3808
3809 if (rw & REQ_WRITE)
3810 num_stripes = map->sub_stripes;
3811 else if (rw & REQ_DISCARD)
3812 num_stripes = min_t(u64, map->sub_stripes *
3813 (stripe_nr_end - stripe_nr_orig),
3814 map->num_stripes);
3815 else if (mirror_num)
3816 stripe_index += mirror_num - 1;
3817 else {
3818 int old_stripe_index = stripe_index;
3819 stripe_index = find_live_mirror(map, stripe_index,
3820 map->sub_stripes, stripe_index +
3821 current->pid % map->sub_stripes);
3822 mirror_num = stripe_index - old_stripe_index + 1;
3823 }
3824 } else {
3825 /*
3826 * after this do_div call, stripe_nr is the number of stripes
3827 * on this device we have to walk to find the data, and
3828 * stripe_index is the number of our device in the stripe array
3829 */
3830 stripe_index = do_div(stripe_nr, map->num_stripes);
3831 mirror_num = stripe_index + 1;
3832 }
3833 BUG_ON(stripe_index >= map->num_stripes);
3834
3835 bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
3836 if (!bbio) {
3837 ret = -ENOMEM;
3838 goto out;
3839 }
3840 atomic_set(&bbio->error, 0);
3841
3842 if (rw & REQ_DISCARD) {
3843 int factor = 0;
3844 int sub_stripes = 0;
3845 u64 stripes_per_dev = 0;
3846 u32 remaining_stripes = 0;
3847 u32 last_stripe = 0;
3848
3849 if (map->type &
3850 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
3851 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3852 sub_stripes = 1;
3853 else
3854 sub_stripes = map->sub_stripes;
3855
3856 factor = map->num_stripes / sub_stripes;
3857 stripes_per_dev = div_u64_rem(stripe_nr_end -
3858 stripe_nr_orig,
3859 factor,
3860 &remaining_stripes);
3861 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3862 last_stripe *= sub_stripes;
3863 }
3864
3865 for (i = 0; i < num_stripes; i++) {
3866 bbio->stripes[i].physical =
3867 map->stripes[stripe_index].physical +
3868 stripe_offset + stripe_nr * map->stripe_len;
3869 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
3870
3871 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
3872 BTRFS_BLOCK_GROUP_RAID10)) {
3873 bbio->stripes[i].length = stripes_per_dev *
3874 map->stripe_len;
3875
3876 if (i / sub_stripes < remaining_stripes)
3877 bbio->stripes[i].length +=
3878 map->stripe_len;
3879
3880 /*
3881 * Special for the first stripe and
3882 * the last stripe:
3883 *
3884 * |-------|...|-------|
3885 * |----------|
3886 * off end_off
3887 */
3888 if (i < sub_stripes)
3889 bbio->stripes[i].length -=
3890 stripe_offset;
3891
3892 if (stripe_index >= last_stripe &&
3893 stripe_index <= (last_stripe +
3894 sub_stripes - 1))
3895 bbio->stripes[i].length -=
3896 stripe_end_offset;
3897
3898 if (i == sub_stripes - 1)
3899 stripe_offset = 0;
3900 } else
3901 bbio->stripes[i].length = *length;
3902
3903 stripe_index++;
3904 if (stripe_index == map->num_stripes) {
3905 /* This could only happen for RAID0/10 */
3906 stripe_index = 0;
3907 stripe_nr++;
3908 }
3909 }
3910 } else {
3911 for (i = 0; i < num_stripes; i++) {
3912 bbio->stripes[i].physical =
3913 map->stripes[stripe_index].physical +
3914 stripe_offset +
3915 stripe_nr * map->stripe_len;
3916 bbio->stripes[i].dev =
3917 map->stripes[stripe_index].dev;
3918 stripe_index++;
3919 }
3920 }
3921
3922 if (rw & REQ_WRITE) {
3923 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
3924 BTRFS_BLOCK_GROUP_RAID10 |
3925 BTRFS_BLOCK_GROUP_DUP)) {
3926 max_errors = 1;
3927 }
3928 }
3929
3930 *bbio_ret = bbio;
3931 bbio->num_stripes = num_stripes;
3932 bbio->max_errors = max_errors;
3933 bbio->mirror_num = mirror_num;
3934 out:
3935 free_extent_map(em);
3936 return ret;
3937 }
3938
btrfs_map_block(struct btrfs_mapping_tree * map_tree,int rw,u64 logical,u64 * length,struct btrfs_bio ** bbio_ret,int mirror_num)3939 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3940 u64 logical, u64 *length,
3941 struct btrfs_bio **bbio_ret, int mirror_num)
3942 {
3943 return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
3944 mirror_num);
3945 }
3946
btrfs_rmap_block(struct btrfs_mapping_tree * map_tree,u64 chunk_start,u64 physical,u64 devid,u64 ** logical,int * naddrs,int * stripe_len)3947 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
3948 u64 chunk_start, u64 physical, u64 devid,
3949 u64 **logical, int *naddrs, int *stripe_len)
3950 {
3951 struct extent_map_tree *em_tree = &map_tree->map_tree;
3952 struct extent_map *em;
3953 struct map_lookup *map;
3954 u64 *buf;
3955 u64 bytenr;
3956 u64 length;
3957 u64 stripe_nr;
3958 int i, j, nr = 0;
3959
3960 read_lock(&em_tree->lock);
3961 em = lookup_extent_mapping(em_tree, chunk_start, 1);
3962 read_unlock(&em_tree->lock);
3963
3964 BUG_ON(!em || em->start != chunk_start);
3965 map = (struct map_lookup *)em->bdev;
3966
3967 length = em->len;
3968 if (map->type & BTRFS_BLOCK_GROUP_RAID10)
3969 do_div(length, map->num_stripes / map->sub_stripes);
3970 else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
3971 do_div(length, map->num_stripes);
3972
3973 buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
3974 BUG_ON(!buf); /* -ENOMEM */
3975
3976 for (i = 0; i < map->num_stripes; i++) {
3977 if (devid && map->stripes[i].dev->devid != devid)
3978 continue;
3979 if (map->stripes[i].physical > physical ||
3980 map->stripes[i].physical + length <= physical)
3981 continue;
3982
3983 stripe_nr = physical - map->stripes[i].physical;
3984 do_div(stripe_nr, map->stripe_len);
3985
3986 if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3987 stripe_nr = stripe_nr * map->num_stripes + i;
3988 do_div(stripe_nr, map->sub_stripes);
3989 } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3990 stripe_nr = stripe_nr * map->num_stripes + i;
3991 }
3992 bytenr = chunk_start + stripe_nr * map->stripe_len;
3993 WARN_ON(nr >= map->num_stripes);
3994 for (j = 0; j < nr; j++) {
3995 if (buf[j] == bytenr)
3996 break;
3997 }
3998 if (j == nr) {
3999 WARN_ON(nr >= map->num_stripes);
4000 buf[nr++] = bytenr;
4001 }
4002 }
4003
4004 *logical = buf;
4005 *naddrs = nr;
4006 *stripe_len = map->stripe_len;
4007
4008 free_extent_map(em);
4009 return 0;
4010 }
4011
btrfs_end_bio(struct bio * bio,int err)4012 static void btrfs_end_bio(struct bio *bio, int err)
4013 {
4014 struct btrfs_bio *bbio = bio->bi_private;
4015 int is_orig_bio = 0;
4016
4017 if (err)
4018 atomic_inc(&bbio->error);
4019
4020 if (bio == bbio->orig_bio)
4021 is_orig_bio = 1;
4022
4023 if (atomic_dec_and_test(&bbio->stripes_pending)) {
4024 if (!is_orig_bio) {
4025 bio_put(bio);
4026 bio = bbio->orig_bio;
4027 }
4028 bio->bi_private = bbio->private;
4029 bio->bi_end_io = bbio->end_io;
4030 bio->bi_bdev = (struct block_device *)
4031 (unsigned long)bbio->mirror_num;
4032 /* only send an error to the higher layers if it is
4033 * beyond the tolerance of the multi-bio
4034 */
4035 if (atomic_read(&bbio->error) > bbio->max_errors) {
4036 err = -EIO;
4037 } else {
4038 /*
4039 * this bio is actually up to date, we didn't
4040 * go over the max number of errors
4041 */
4042 set_bit(BIO_UPTODATE, &bio->bi_flags);
4043 err = 0;
4044 }
4045 kfree(bbio);
4046
4047 bio_endio(bio, err);
4048 } else if (!is_orig_bio) {
4049 bio_put(bio);
4050 }
4051 }
4052
4053 struct async_sched {
4054 struct bio *bio;
4055 int rw;
4056 struct btrfs_fs_info *info;
4057 struct btrfs_work work;
4058 };
4059
4060 /*
4061 * see run_scheduled_bios for a description of why bios are collected for
4062 * async submit.
4063 *
4064 * This will add one bio to the pending list for a device and make sure
4065 * the work struct is scheduled.
4066 */
schedule_bio(struct btrfs_root * root,struct btrfs_device * device,int rw,struct bio * bio)4067 static noinline void schedule_bio(struct btrfs_root *root,
4068 struct btrfs_device *device,
4069 int rw, struct bio *bio)
4070 {
4071 int should_queue = 1;
4072 struct btrfs_pending_bios *pending_bios;
4073
4074 /* don't bother with additional async steps for reads, right now */
4075 if (!(rw & REQ_WRITE)) {
4076 bio_get(bio);
4077 btrfsic_submit_bio(rw, bio);
4078 bio_put(bio);
4079 return;
4080 }
4081
4082 /*
4083 * nr_async_bios allows us to reliably return congestion to the
4084 * higher layers. Otherwise, the async bio makes it appear we have
4085 * made progress against dirty pages when we've really just put it
4086 * on a queue for later
4087 */
4088 atomic_inc(&root->fs_info->nr_async_bios);
4089 WARN_ON(bio->bi_next);
4090 bio->bi_next = NULL;
4091 bio->bi_rw |= rw;
4092
4093 spin_lock(&device->io_lock);
4094 if (bio->bi_rw & REQ_SYNC)
4095 pending_bios = &device->pending_sync_bios;
4096 else
4097 pending_bios = &device->pending_bios;
4098
4099 if (pending_bios->tail)
4100 pending_bios->tail->bi_next = bio;
4101
4102 pending_bios->tail = bio;
4103 if (!pending_bios->head)
4104 pending_bios->head = bio;
4105 if (device->running_pending)
4106 should_queue = 0;
4107
4108 spin_unlock(&device->io_lock);
4109
4110 if (should_queue)
4111 btrfs_queue_worker(&root->fs_info->submit_workers,
4112 &device->work);
4113 }
4114
btrfs_map_bio(struct btrfs_root * root,int rw,struct bio * bio,int mirror_num,int async_submit)4115 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4116 int mirror_num, int async_submit)
4117 {
4118 struct btrfs_mapping_tree *map_tree;
4119 struct btrfs_device *dev;
4120 struct bio *first_bio = bio;
4121 u64 logical = (u64)bio->bi_sector << 9;
4122 u64 length = 0;
4123 u64 map_length;
4124 int ret;
4125 int dev_nr = 0;
4126 int total_devs = 1;
4127 struct btrfs_bio *bbio = NULL;
4128
4129 length = bio->bi_size;
4130 map_tree = &root->fs_info->mapping_tree;
4131 map_length = length;
4132
4133 ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
4134 mirror_num);
4135 if (ret) /* -ENOMEM */
4136 return ret;
4137
4138 total_devs = bbio->num_stripes;
4139 if (map_length < length) {
4140 printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
4141 "len %llu\n", (unsigned long long)logical,
4142 (unsigned long long)length,
4143 (unsigned long long)map_length);
4144 BUG();
4145 }
4146
4147 bbio->orig_bio = first_bio;
4148 bbio->private = first_bio->bi_private;
4149 bbio->end_io = first_bio->bi_end_io;
4150 atomic_set(&bbio->stripes_pending, bbio->num_stripes);
4151
4152 while (dev_nr < total_devs) {
4153 if (dev_nr < total_devs - 1) {
4154 bio = bio_clone(first_bio, GFP_NOFS);
4155 BUG_ON(!bio); /* -ENOMEM */
4156 } else {
4157 bio = first_bio;
4158 }
4159 bio->bi_private = bbio;
4160 bio->bi_end_io = btrfs_end_bio;
4161 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
4162 dev = bbio->stripes[dev_nr].dev;
4163 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
4164 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
4165 "(%s id %llu), size=%u\n", rw,
4166 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4167 dev->name, dev->devid, bio->bi_size);
4168 bio->bi_bdev = dev->bdev;
4169 if (async_submit)
4170 schedule_bio(root, dev, rw, bio);
4171 else
4172 btrfsic_submit_bio(rw, bio);
4173 } else {
4174 bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
4175 bio->bi_sector = logical >> 9;
4176 bio_endio(bio, -EIO);
4177 }
4178 dev_nr++;
4179 }
4180 return 0;
4181 }
4182
btrfs_find_device(struct btrfs_root * root,u64 devid,u8 * uuid,u8 * fsid)4183 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
4184 u8 *uuid, u8 *fsid)
4185 {
4186 struct btrfs_device *device;
4187 struct btrfs_fs_devices *cur_devices;
4188
4189 cur_devices = root->fs_info->fs_devices;
4190 while (cur_devices) {
4191 if (!fsid ||
4192 !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4193 device = __find_device(&cur_devices->devices,
4194 devid, uuid);
4195 if (device)
4196 return device;
4197 }
4198 cur_devices = cur_devices->seed;
4199 }
4200 return NULL;
4201 }
4202
add_missing_dev(struct btrfs_root * root,u64 devid,u8 * dev_uuid)4203 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
4204 u64 devid, u8 *dev_uuid)
4205 {
4206 struct btrfs_device *device;
4207 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4208
4209 device = kzalloc(sizeof(*device), GFP_NOFS);
4210 if (!device)
4211 return NULL;
4212 list_add(&device->dev_list,
4213 &fs_devices->devices);
4214 device->dev_root = root->fs_info->dev_root;
4215 device->devid = devid;
4216 device->work.func = pending_bios_fn;
4217 device->fs_devices = fs_devices;
4218 device->missing = 1;
4219 fs_devices->num_devices++;
4220 fs_devices->missing_devices++;
4221 spin_lock_init(&device->io_lock);
4222 INIT_LIST_HEAD(&device->dev_alloc_list);
4223 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
4224 return device;
4225 }
4226
read_one_chunk(struct btrfs_root * root,struct btrfs_key * key,struct extent_buffer * leaf,struct btrfs_chunk * chunk)4227 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
4228 struct extent_buffer *leaf,
4229 struct btrfs_chunk *chunk)
4230 {
4231 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4232 struct map_lookup *map;
4233 struct extent_map *em;
4234 u64 logical;
4235 u64 length;
4236 u64 devid;
4237 u8 uuid[BTRFS_UUID_SIZE];
4238 int num_stripes;
4239 int ret;
4240 int i;
4241
4242 logical = key->offset;
4243 length = btrfs_chunk_length(leaf, chunk);
4244
4245 read_lock(&map_tree->map_tree.lock);
4246 em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
4247 read_unlock(&map_tree->map_tree.lock);
4248
4249 /* already mapped? */
4250 if (em && em->start <= logical && em->start + em->len > logical) {
4251 free_extent_map(em);
4252 return 0;
4253 } else if (em) {
4254 free_extent_map(em);
4255 }
4256
4257 em = alloc_extent_map();
4258 if (!em)
4259 return -ENOMEM;
4260 num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
4261 map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
4262 if (!map) {
4263 free_extent_map(em);
4264 return -ENOMEM;
4265 }
4266
4267 em->bdev = (struct block_device *)map;
4268 em->start = logical;
4269 em->len = length;
4270 em->block_start = 0;
4271 em->block_len = em->len;
4272
4273 map->num_stripes = num_stripes;
4274 map->io_width = btrfs_chunk_io_width(leaf, chunk);
4275 map->io_align = btrfs_chunk_io_align(leaf, chunk);
4276 map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
4277 map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
4278 map->type = btrfs_chunk_type(leaf, chunk);
4279 map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
4280 for (i = 0; i < num_stripes; i++) {
4281 map->stripes[i].physical =
4282 btrfs_stripe_offset_nr(leaf, chunk, i);
4283 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
4284 read_extent_buffer(leaf, uuid, (unsigned long)
4285 btrfs_stripe_dev_uuid_nr(chunk, i),
4286 BTRFS_UUID_SIZE);
4287 map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
4288 NULL);
4289 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
4290 kfree(map);
4291 free_extent_map(em);
4292 return -EIO;
4293 }
4294 if (!map->stripes[i].dev) {
4295 map->stripes[i].dev =
4296 add_missing_dev(root, devid, uuid);
4297 if (!map->stripes[i].dev) {
4298 kfree(map);
4299 free_extent_map(em);
4300 return -EIO;
4301 }
4302 }
4303 map->stripes[i].dev->in_fs_metadata = 1;
4304 }
4305
4306 write_lock(&map_tree->map_tree.lock);
4307 ret = add_extent_mapping(&map_tree->map_tree, em);
4308 write_unlock(&map_tree->map_tree.lock);
4309 BUG_ON(ret); /* Tree corruption */
4310 free_extent_map(em);
4311
4312 return 0;
4313 }
4314
fill_device_from_item(struct extent_buffer * leaf,struct btrfs_dev_item * dev_item,struct btrfs_device * device)4315 static void fill_device_from_item(struct extent_buffer *leaf,
4316 struct btrfs_dev_item *dev_item,
4317 struct btrfs_device *device)
4318 {
4319 unsigned long ptr;
4320
4321 device->devid = btrfs_device_id(leaf, dev_item);
4322 device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
4323 device->total_bytes = device->disk_total_bytes;
4324 device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
4325 device->type = btrfs_device_type(leaf, dev_item);
4326 device->io_align = btrfs_device_io_align(leaf, dev_item);
4327 device->io_width = btrfs_device_io_width(leaf, dev_item);
4328 device->sector_size = btrfs_device_sector_size(leaf, dev_item);
4329
4330 ptr = (unsigned long)btrfs_device_uuid(dev_item);
4331 read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
4332 }
4333
open_seed_devices(struct btrfs_root * root,u8 * fsid)4334 static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
4335 {
4336 struct btrfs_fs_devices *fs_devices;
4337 int ret;
4338
4339 BUG_ON(!mutex_is_locked(&uuid_mutex));
4340
4341 fs_devices = root->fs_info->fs_devices->seed;
4342 while (fs_devices) {
4343 if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
4344 ret = 0;
4345 goto out;
4346 }
4347 fs_devices = fs_devices->seed;
4348 }
4349
4350 fs_devices = find_fsid(fsid);
4351 if (!fs_devices) {
4352 ret = -ENOENT;
4353 goto out;
4354 }
4355
4356 fs_devices = clone_fs_devices(fs_devices);
4357 if (IS_ERR(fs_devices)) {
4358 ret = PTR_ERR(fs_devices);
4359 goto out;
4360 }
4361
4362 ret = __btrfs_open_devices(fs_devices, FMODE_READ,
4363 root->fs_info->bdev_holder);
4364 if (ret) {
4365 free_fs_devices(fs_devices);
4366 goto out;
4367 }
4368
4369 if (!fs_devices->seeding) {
4370 __btrfs_close_devices(fs_devices);
4371 free_fs_devices(fs_devices);
4372 ret = -EINVAL;
4373 goto out;
4374 }
4375
4376 fs_devices->seed = root->fs_info->fs_devices->seed;
4377 root->fs_info->fs_devices->seed = fs_devices;
4378 out:
4379 return ret;
4380 }
4381
read_one_dev(struct btrfs_root * root,struct extent_buffer * leaf,struct btrfs_dev_item * dev_item)4382 static int read_one_dev(struct btrfs_root *root,
4383 struct extent_buffer *leaf,
4384 struct btrfs_dev_item *dev_item)
4385 {
4386 struct btrfs_device *device;
4387 u64 devid;
4388 int ret;
4389 u8 fs_uuid[BTRFS_UUID_SIZE];
4390 u8 dev_uuid[BTRFS_UUID_SIZE];
4391
4392 devid = btrfs_device_id(leaf, dev_item);
4393 read_extent_buffer(leaf, dev_uuid,
4394 (unsigned long)btrfs_device_uuid(dev_item),
4395 BTRFS_UUID_SIZE);
4396 read_extent_buffer(leaf, fs_uuid,
4397 (unsigned long)btrfs_device_fsid(dev_item),
4398 BTRFS_UUID_SIZE);
4399
4400 if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
4401 ret = open_seed_devices(root, fs_uuid);
4402 if (ret && !btrfs_test_opt(root, DEGRADED))
4403 return ret;
4404 }
4405
4406 device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
4407 if (!device || !device->bdev) {
4408 if (!btrfs_test_opt(root, DEGRADED))
4409 return -EIO;
4410
4411 if (!device) {
4412 printk(KERN_WARNING "warning devid %llu missing\n",
4413 (unsigned long long)devid);
4414 device = add_missing_dev(root, devid, dev_uuid);
4415 if (!device)
4416 return -ENOMEM;
4417 } else if (!device->missing) {
4418 /*
4419 * this happens when a device that was properly setup
4420 * in the device info lists suddenly goes bad.
4421 * device->bdev is NULL, and so we have to set
4422 * device->missing to one here
4423 */
4424 root->fs_info->fs_devices->missing_devices++;
4425 device->missing = 1;
4426 }
4427 }
4428
4429 if (device->fs_devices != root->fs_info->fs_devices) {
4430 BUG_ON(device->writeable);
4431 if (device->generation !=
4432 btrfs_device_generation(leaf, dev_item))
4433 return -EINVAL;
4434 }
4435
4436 fill_device_from_item(leaf, dev_item, device);
4437 device->dev_root = root->fs_info->dev_root;
4438 device->in_fs_metadata = 1;
4439 if (device->writeable) {
4440 device->fs_devices->total_rw_bytes += device->total_bytes;
4441 spin_lock(&root->fs_info->free_chunk_lock);
4442 root->fs_info->free_chunk_space += device->total_bytes -
4443 device->bytes_used;
4444 spin_unlock(&root->fs_info->free_chunk_lock);
4445 }
4446 ret = 0;
4447 return ret;
4448 }
4449
btrfs_read_sys_array(struct btrfs_root * root)4450 int btrfs_read_sys_array(struct btrfs_root *root)
4451 {
4452 struct btrfs_super_block *super_copy = root->fs_info->super_copy;
4453 struct extent_buffer *sb;
4454 struct btrfs_disk_key *disk_key;
4455 struct btrfs_chunk *chunk;
4456 u8 *ptr;
4457 unsigned long sb_ptr;
4458 int ret = 0;
4459 u32 num_stripes;
4460 u32 array_size;
4461 u32 len = 0;
4462 u32 cur;
4463 struct btrfs_key key;
4464
4465 sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
4466 BTRFS_SUPER_INFO_SIZE);
4467 if (!sb)
4468 return -ENOMEM;
4469 btrfs_set_buffer_uptodate(sb);
4470 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4471 /*
4472 * The sb extent buffer is artifical and just used to read the system array.
4473 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4474 * pages up-to-date when the page is larger: extent does not cover the
4475 * whole page and consequently check_page_uptodate does not find all
4476 * the page's extents up-to-date (the hole beyond sb),
4477 * write_extent_buffer then triggers a WARN_ON.
4478 *
4479 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4480 * but sb spans only this function. Add an explicit SetPageUptodate call
4481 * to silence the warning eg. on PowerPC 64.
4482 */
4483 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4484 SetPageUptodate(sb->pages[0]);
4485
4486 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4487 array_size = btrfs_super_sys_array_size(super_copy);
4488
4489 ptr = super_copy->sys_chunk_array;
4490 sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
4491 cur = 0;
4492
4493 while (cur < array_size) {
4494 disk_key = (struct btrfs_disk_key *)ptr;
4495 btrfs_disk_key_to_cpu(&key, disk_key);
4496
4497 len = sizeof(*disk_key); ptr += len;
4498 sb_ptr += len;
4499 cur += len;
4500
4501 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
4502 chunk = (struct btrfs_chunk *)sb_ptr;
4503 ret = read_one_chunk(root, &key, sb, chunk);
4504 if (ret)
4505 break;
4506 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
4507 len = btrfs_chunk_item_size(num_stripes);
4508 } else {
4509 ret = -EIO;
4510 break;
4511 }
4512 ptr += len;
4513 sb_ptr += len;
4514 cur += len;
4515 }
4516 free_extent_buffer(sb);
4517 return ret;
4518 }
4519
btrfs_read_chunk_tree(struct btrfs_root * root)4520 int btrfs_read_chunk_tree(struct btrfs_root *root)
4521 {
4522 struct btrfs_path *path;
4523 struct extent_buffer *leaf;
4524 struct btrfs_key key;
4525 struct btrfs_key found_key;
4526 int ret;
4527 int slot;
4528
4529 root = root->fs_info->chunk_root;
4530
4531 path = btrfs_alloc_path();
4532 if (!path)
4533 return -ENOMEM;
4534
4535 mutex_lock(&uuid_mutex);
4536 lock_chunks(root);
4537
4538 /* first we search for all of the device items, and then we
4539 * read in all of the chunk items. This way we can create chunk
4540 * mappings that reference all of the devices that are afound
4541 */
4542 key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
4543 key.offset = 0;
4544 key.type = 0;
4545 again:
4546 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4547 if (ret < 0)
4548 goto error;
4549 while (1) {
4550 leaf = path->nodes[0];
4551 slot = path->slots[0];
4552 if (slot >= btrfs_header_nritems(leaf)) {
4553 ret = btrfs_next_leaf(root, path);
4554 if (ret == 0)
4555 continue;
4556 if (ret < 0)
4557 goto error;
4558 break;
4559 }
4560 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4561 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4562 if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
4563 break;
4564 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
4565 struct btrfs_dev_item *dev_item;
4566 dev_item = btrfs_item_ptr(leaf, slot,
4567 struct btrfs_dev_item);
4568 ret = read_one_dev(root, leaf, dev_item);
4569 if (ret)
4570 goto error;
4571 }
4572 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
4573 struct btrfs_chunk *chunk;
4574 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
4575 ret = read_one_chunk(root, &found_key, leaf, chunk);
4576 if (ret)
4577 goto error;
4578 }
4579 path->slots[0]++;
4580 }
4581 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
4582 key.objectid = 0;
4583 btrfs_release_path(path);
4584 goto again;
4585 }
4586 ret = 0;
4587 error:
4588 unlock_chunks(root);
4589 mutex_unlock(&uuid_mutex);
4590
4591 btrfs_free_path(path);
4592 return ret;
4593 }
4594