Lines Matching refs:rbio

66 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
67 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
70 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
71 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
72 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
73 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
74 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
76 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
80 static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func) in start_async_work() argument
82 INIT_WORK(&rbio->work, work_func); in start_async_work()
83 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work); in start_async_work()
138 static void cache_rbio_pages(struct btrfs_raid_bio *rbio) in cache_rbio_pages() argument
143 ret = alloc_rbio_pages(rbio); in cache_rbio_pages()
147 for (i = 0; i < rbio->nr_sectors; i++) { in cache_rbio_pages()
149 if (!rbio->bio_sectors[i].page) in cache_rbio_pages()
152 ASSERT(rbio->stripe_sectors[i].page); in cache_rbio_pages()
153 memcpy_page(rbio->stripe_sectors[i].page, in cache_rbio_pages()
154 rbio->stripe_sectors[i].pgoff, in cache_rbio_pages()
155 rbio->bio_sectors[i].page, in cache_rbio_pages()
156 rbio->bio_sectors[i].pgoff, in cache_rbio_pages()
157 rbio->bioc->fs_info->sectorsize); in cache_rbio_pages()
158 rbio->stripe_sectors[i].uptodate = 1; in cache_rbio_pages()
160 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in cache_rbio_pages()
166 static int rbio_bucket(struct btrfs_raid_bio *rbio) in rbio_bucket() argument
168 u64 num = rbio->bioc->raid_map[0]; in rbio_bucket()
181 static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio, in full_page_sectors_uptodate() argument
184 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in full_page_sectors_uptodate()
188 ASSERT(page_nr < rbio->nr_pages); in full_page_sectors_uptodate()
193 if (!rbio->stripe_sectors[i].uptodate) in full_page_sectors_uptodate()
204 static void index_stripe_sectors(struct btrfs_raid_bio *rbio) in index_stripe_sectors() argument
206 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in index_stripe_sectors()
210 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) { in index_stripe_sectors()
213 ASSERT(page_index < rbio->nr_pages); in index_stripe_sectors()
214 rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index]; in index_stripe_sectors()
215 rbio->stripe_sectors[i].pgoff = offset_in_page(offset); in index_stripe_sectors()
285 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in __remove_rbio_from_cache() argument
287 int bucket = rbio_bucket(rbio); in __remove_rbio_from_cache()
295 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in __remove_rbio_from_cache()
298 table = rbio->bioc->fs_info->stripe_hash_table; in __remove_rbio_from_cache()
310 spin_lock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
312 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { in __remove_rbio_from_cache()
313 list_del_init(&rbio->stripe_cache); in __remove_rbio_from_cache()
326 if (bio_list_empty(&rbio->bio_list)) { in __remove_rbio_from_cache()
327 if (!list_empty(&rbio->hash_list)) { in __remove_rbio_from_cache()
328 list_del_init(&rbio->hash_list); in __remove_rbio_from_cache()
329 refcount_dec(&rbio->refs); in __remove_rbio_from_cache()
330 BUG_ON(!list_empty(&rbio->plug_list)); in __remove_rbio_from_cache()
335 spin_unlock(&rbio->bio_list_lock); in __remove_rbio_from_cache()
339 __free_raid_bio(rbio); in __remove_rbio_from_cache()
345 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) in remove_rbio_from_cache() argument
350 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) in remove_rbio_from_cache()
353 table = rbio->bioc->fs_info->stripe_hash_table; in remove_rbio_from_cache()
356 __remove_rbio_from_cache(rbio); in remove_rbio_from_cache()
367 struct btrfs_raid_bio *rbio; in btrfs_clear_rbio_cache() local
373 rbio = list_entry(table->stripe_cache.next, in btrfs_clear_rbio_cache()
376 __remove_rbio_from_cache(rbio); in btrfs_clear_rbio_cache()
405 static void cache_rbio(struct btrfs_raid_bio *rbio) in cache_rbio() argument
410 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) in cache_rbio()
413 table = rbio->bioc->fs_info->stripe_hash_table; in cache_rbio()
416 spin_lock(&rbio->bio_list_lock); in cache_rbio()
419 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) in cache_rbio()
420 refcount_inc(&rbio->refs); in cache_rbio()
422 if (!list_empty(&rbio->stripe_cache)){ in cache_rbio()
423 list_move(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
425 list_add(&rbio->stripe_cache, &table->stripe_cache); in cache_rbio()
429 spin_unlock(&rbio->bio_list_lock); in cache_rbio()
438 if (found != rbio) in cache_rbio()
469 static int rbio_is_full(struct btrfs_raid_bio *rbio) in rbio_is_full() argument
472 unsigned long size = rbio->bio_list_bytes; in rbio_is_full()
475 spin_lock_irqsave(&rbio->bio_list_lock, flags); in rbio_is_full()
476 if (size != rbio->nr_data * BTRFS_STRIPE_LEN) in rbio_is_full()
478 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN); in rbio_is_full()
479 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in rbio_is_full()
554 static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio, in rbio_stripe_sector_index() argument
558 ASSERT(stripe_nr < rbio->real_stripes); in rbio_stripe_sector_index()
559 ASSERT(sector_nr < rbio->stripe_nsectors); in rbio_stripe_sector_index()
561 return stripe_nr * rbio->stripe_nsectors + sector_nr; in rbio_stripe_sector_index()
565 static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio, in rbio_stripe_sector() argument
569 return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr, in rbio_stripe_sector()
574 static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio, in rbio_pstripe_sector() argument
577 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr); in rbio_pstripe_sector()
581 static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio, in rbio_qstripe_sector() argument
584 if (rbio->nr_data + 1 == rbio->real_stripes) in rbio_qstripe_sector()
586 return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr); in rbio_qstripe_sector()
611 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) in lock_stripe_add() argument
621 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio); in lock_stripe_add()
625 if (cur->bioc->raid_map[0] != rbio->bioc->raid_map[0]) in lock_stripe_add()
638 steal_rbio(cur, rbio); in lock_stripe_add()
646 if (rbio_can_merge(cur, rbio)) { in lock_stripe_add()
647 merge_rbio(cur, rbio); in lock_stripe_add()
649 freeit = rbio; in lock_stripe_add()
661 if (rbio_can_merge(pending, rbio)) { in lock_stripe_add()
662 merge_rbio(pending, rbio); in lock_stripe_add()
664 freeit = rbio; in lock_stripe_add()
674 list_add_tail(&rbio->plug_list, &cur->plug_list); in lock_stripe_add()
680 refcount_inc(&rbio->refs); in lock_stripe_add()
681 list_add(&rbio->hash_list, &h->hash_list); in lock_stripe_add()
695 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) in unlock_stripe() argument
702 bucket = rbio_bucket(rbio); in unlock_stripe()
703 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket; in unlock_stripe()
705 if (list_empty(&rbio->plug_list)) in unlock_stripe()
706 cache_rbio(rbio); in unlock_stripe()
709 spin_lock(&rbio->bio_list_lock); in unlock_stripe()
711 if (!list_empty(&rbio->hash_list)) { in unlock_stripe()
717 if (list_empty(&rbio->plug_list) && in unlock_stripe()
718 test_bit(RBIO_CACHE_BIT, &rbio->flags)) { in unlock_stripe()
720 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in unlock_stripe()
721 BUG_ON(!bio_list_empty(&rbio->bio_list)); in unlock_stripe()
725 list_del_init(&rbio->hash_list); in unlock_stripe()
726 refcount_dec(&rbio->refs); in unlock_stripe()
733 if (!list_empty(&rbio->plug_list)) { in unlock_stripe()
735 struct list_head *head = rbio->plug_list.next; in unlock_stripe()
740 list_del_init(&rbio->plug_list); in unlock_stripe()
744 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
750 steal_rbio(rbio, next); in unlock_stripe()
753 steal_rbio(rbio, next); in unlock_stripe()
756 steal_rbio(rbio, next); in unlock_stripe()
764 spin_unlock(&rbio->bio_list_lock); in unlock_stripe()
769 remove_rbio_from_cache(rbio); in unlock_stripe()
772 static void __free_raid_bio(struct btrfs_raid_bio *rbio) in __free_raid_bio() argument
776 if (!refcount_dec_and_test(&rbio->refs)) in __free_raid_bio()
779 WARN_ON(!list_empty(&rbio->stripe_cache)); in __free_raid_bio()
780 WARN_ON(!list_empty(&rbio->hash_list)); in __free_raid_bio()
781 WARN_ON(!bio_list_empty(&rbio->bio_list)); in __free_raid_bio()
783 for (i = 0; i < rbio->nr_pages; i++) { in __free_raid_bio()
784 if (rbio->stripe_pages[i]) { in __free_raid_bio()
785 __free_page(rbio->stripe_pages[i]); in __free_raid_bio()
786 rbio->stripe_pages[i] = NULL; in __free_raid_bio()
790 btrfs_put_bioc(rbio->bioc); in __free_raid_bio()
791 kfree(rbio); in __free_raid_bio()
811 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) in rbio_orig_end_io() argument
813 struct bio *cur = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
821 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors); in rbio_orig_end_io()
831 unlock_stripe(rbio); in rbio_orig_end_io()
832 extra = bio_list_get(&rbio->bio_list); in rbio_orig_end_io()
833 __free_raid_bio(rbio); in rbio_orig_end_io()
846 struct btrfs_raid_bio *rbio = bio->bi_private; in raid_write_end_io() local
851 fail_bio_stripe(rbio, bio); in raid_write_end_io()
855 if (!atomic_dec_and_test(&rbio->stripes_pending)) in raid_write_end_io()
861 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? in raid_write_end_io()
862 0 : rbio->bioc->max_errors; in raid_write_end_io()
863 if (atomic_read(&rbio->error) > max_errors) in raid_write_end_io()
866 rbio_orig_end_io(rbio, err); in raid_write_end_io()
881 static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio, in sector_in_rbio() argument
888 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes); in sector_in_rbio()
889 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors); in sector_in_rbio()
891 index = stripe_nr * rbio->stripe_nsectors + sector_nr; in sector_in_rbio()
892 ASSERT(index >= 0 && index < rbio->nr_sectors); in sector_in_rbio()
894 spin_lock_irq(&rbio->bio_list_lock); in sector_in_rbio()
895 sector = &rbio->bio_sectors[index]; in sector_in_rbio()
900 spin_unlock_irq(&rbio->bio_list_lock); in sector_in_rbio()
903 spin_unlock_irq(&rbio->bio_list_lock); in sector_in_rbio()
905 return &rbio->stripe_sectors[index]; in sector_in_rbio()
921 struct btrfs_raid_bio *rbio; in alloc_rbio() local
932 rbio = kzalloc(sizeof(*rbio) + in alloc_rbio()
933 sizeof(*rbio->stripe_pages) * num_pages + in alloc_rbio()
934 sizeof(*rbio->bio_sectors) * num_sectors + in alloc_rbio()
935 sizeof(*rbio->stripe_sectors) * num_sectors + in alloc_rbio()
936 sizeof(*rbio->finish_pointers) * real_stripes, in alloc_rbio()
938 if (!rbio) in alloc_rbio()
941 bio_list_init(&rbio->bio_list); in alloc_rbio()
942 INIT_LIST_HEAD(&rbio->plug_list); in alloc_rbio()
943 spin_lock_init(&rbio->bio_list_lock); in alloc_rbio()
944 INIT_LIST_HEAD(&rbio->stripe_cache); in alloc_rbio()
945 INIT_LIST_HEAD(&rbio->hash_list); in alloc_rbio()
947 rbio->bioc = bioc; in alloc_rbio()
948 rbio->nr_pages = num_pages; in alloc_rbio()
949 rbio->nr_sectors = num_sectors; in alloc_rbio()
950 rbio->real_stripes = real_stripes; in alloc_rbio()
951 rbio->stripe_npages = stripe_npages; in alloc_rbio()
952 rbio->stripe_nsectors = stripe_nsectors; in alloc_rbio()
953 rbio->faila = -1; in alloc_rbio()
954 rbio->failb = -1; in alloc_rbio()
955 refcount_set(&rbio->refs, 1); in alloc_rbio()
956 atomic_set(&rbio->error, 0); in alloc_rbio()
957 atomic_set(&rbio->stripes_pending, 0); in alloc_rbio()
963 p = rbio + 1; in alloc_rbio()
968 CONSUME_ALLOC(rbio->stripe_pages, num_pages); in alloc_rbio()
969 CONSUME_ALLOC(rbio->bio_sectors, num_sectors); in alloc_rbio()
970 CONSUME_ALLOC(rbio->stripe_sectors, num_sectors); in alloc_rbio()
971 CONSUME_ALLOC(rbio->finish_pointers, real_stripes); in alloc_rbio()
975 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type); in alloc_rbio()
977 return rbio; in alloc_rbio()
981 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_pages() argument
985 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages); in alloc_rbio_pages()
989 index_stripe_sectors(rbio); in alloc_rbio_pages()
994 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_parity_pages() argument
996 const int data_pages = rbio->nr_data * rbio->stripe_npages; in alloc_rbio_parity_pages()
999 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages, in alloc_rbio_parity_pages()
1000 rbio->stripe_pages + data_pages); in alloc_rbio_parity_pages()
1004 index_stripe_sectors(rbio); in alloc_rbio_parity_pages()
1014 static int rbio_add_io_sector(struct btrfs_raid_bio *rbio, in rbio_add_io_sector() argument
1021 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in rbio_add_io_sector()
1033 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes); in rbio_add_io_sector()
1034 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors); in rbio_add_io_sector()
1037 stripe = &rbio->bioc->stripes[stripe_nr]; in rbio_add_io_sector()
1042 return fail_rbio_index(rbio, stripe_nr); in rbio_add_io_sector()
1067 bio->bi_private = rbio; in rbio_add_io_sector()
1081 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) in validate_rbio_for_rmw() argument
1083 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_rmw()
1084 BUG_ON(rbio->faila == rbio->real_stripes - 1); in validate_rbio_for_rmw()
1085 __raid56_parity_recover(rbio); in validate_rbio_for_rmw()
1087 finish_rmw(rbio); in validate_rbio_for_rmw()
1091 static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio) in index_one_bio() argument
1093 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in index_one_bio()
1097 rbio->bioc->raid_map[0]; in index_one_bio()
1105 struct sector_ptr *sector = &rbio->bio_sectors[index]; in index_one_bio()
1122 static void index_rbio_pages(struct btrfs_raid_bio *rbio) in index_rbio_pages() argument
1126 spin_lock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1127 bio_list_for_each(bio, &rbio->bio_list) in index_rbio_pages()
1128 index_one_bio(rbio, bio); in index_rbio_pages()
1130 spin_unlock_irq(&rbio->bio_list_lock); in index_rbio_pages()
1133 static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio, in bio_get_trace_info() argument
1136 const struct btrfs_io_context *bioc = rbio->bioc; in bio_get_trace_info()
1169 static noinline void finish_rmw(struct btrfs_raid_bio *rbio) in finish_rmw() argument
1171 struct btrfs_io_context *bioc = rbio->bioc; in finish_rmw()
1173 void **pointers = rbio->finish_pointers; in finish_rmw()
1174 int nr_data = rbio->nr_data; in finish_rmw()
1187 if (rbio->real_stripes - rbio->nr_data == 1) in finish_rmw()
1189 else if (rbio->real_stripes - rbio->nr_data == 2) in finish_rmw()
1195 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors)); in finish_rmw()
1205 spin_lock_irq(&rbio->bio_list_lock); in finish_rmw()
1206 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in finish_rmw()
1207 spin_unlock_irq(&rbio->bio_list_lock); in finish_rmw()
1209 atomic_set(&rbio->error, 0); in finish_rmw()
1220 index_rbio_pages(rbio); in finish_rmw()
1221 if (!rbio_is_full(rbio)) in finish_rmw()
1222 cache_rbio_pages(rbio); in finish_rmw()
1224 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_rmw()
1226 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { in finish_rmw()
1231 sector = sector_in_rbio(rbio, stripe, sectornr, 0); in finish_rmw()
1237 sector = rbio_pstripe_sector(rbio, sectornr); in finish_rmw()
1246 sector = rbio_qstripe_sector(rbio, sectornr); in finish_rmw()
1251 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, in finish_rmw()
1266 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in finish_rmw()
1270 stripe = total_sector_nr / rbio->stripe_nsectors; in finish_rmw()
1271 sectornr = total_sector_nr % rbio->stripe_nsectors; in finish_rmw()
1274 if (!test_bit(sectornr, &rbio->dbitmap)) in finish_rmw()
1277 if (stripe < rbio->nr_data) { in finish_rmw()
1278 sector = sector_in_rbio(rbio, stripe, sectornr, 1); in finish_rmw()
1282 sector = rbio_stripe_sector(rbio, stripe, sectornr); in finish_rmw()
1285 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe, in finish_rmw()
1294 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in finish_rmw()
1298 stripe = total_sector_nr / rbio->stripe_nsectors; in finish_rmw()
1299 sectornr = total_sector_nr % rbio->stripe_nsectors; in finish_rmw()
1307 total_sector_nr += rbio->stripe_nsectors - 1; in finish_rmw()
1312 if (!test_bit(sectornr, &rbio->dbitmap)) in finish_rmw()
1315 if (stripe < rbio->nr_data) { in finish_rmw()
1316 sector = sector_in_rbio(rbio, stripe, sectornr, 1); in finish_rmw()
1320 sector = rbio_stripe_sector(rbio, stripe, sectornr); in finish_rmw()
1323 ret = rbio_add_io_sector(rbio, &bio_list, sector, in finish_rmw()
1324 rbio->bioc->tgtdev_map[stripe], in finish_rmw()
1331 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); in finish_rmw()
1332 BUG_ON(atomic_read(&rbio->stripes_pending) == 0); in finish_rmw()
1340 bio_get_trace_info(rbio, bio, &trace_info); in finish_rmw()
1341 trace_raid56_write_stripe(rbio, bio, &trace_info); in finish_rmw()
1348 rbio_orig_end_io(rbio, BLK_STS_IOERR); in finish_rmw()
1359 static int find_bio_stripe(struct btrfs_raid_bio *rbio, in find_bio_stripe() argument
1368 for (i = 0; i < rbio->bioc->num_stripes; i++) { in find_bio_stripe()
1369 stripe = &rbio->bioc->stripes[i]; in find_bio_stripe()
1383 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, in find_logical_bio_stripe() argument
1389 for (i = 0; i < rbio->nr_data; i++) { in find_logical_bio_stripe()
1390 u64 stripe_start = rbio->bioc->raid_map[i]; in find_logical_bio_stripe()
1401 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) in fail_rbio_index() argument
1406 spin_lock_irqsave(&rbio->bio_list_lock, flags); in fail_rbio_index()
1409 if (rbio->faila == failed || rbio->failb == failed) in fail_rbio_index()
1412 if (rbio->faila == -1) { in fail_rbio_index()
1414 rbio->faila = failed; in fail_rbio_index()
1415 atomic_inc(&rbio->error); in fail_rbio_index()
1416 } else if (rbio->failb == -1) { in fail_rbio_index()
1418 rbio->failb = failed; in fail_rbio_index()
1419 atomic_inc(&rbio->error); in fail_rbio_index()
1424 spin_unlock_irqrestore(&rbio->bio_list_lock, flags); in fail_rbio_index()
1433 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, in fail_bio_stripe() argument
1436 int failed = find_bio_stripe(rbio, bio); in fail_bio_stripe()
1441 return fail_rbio_index(rbio, failed); in fail_bio_stripe()
1448 static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio, in find_stripe_sector() argument
1454 for (i = 0; i < rbio->nr_sectors; i++) { in find_stripe_sector()
1455 struct sector_ptr *sector = &rbio->stripe_sectors[i]; in find_stripe_sector()
1467 static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio) in set_bio_pages_uptodate() argument
1469 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in set_bio_pages_uptodate()
1481 sector = find_stripe_sector(rbio, bvec->bv_page, pgoff); in set_bio_pages_uptodate()
1491 struct btrfs_raid_bio *rbio = bio->bi_private; in raid56_bio_end_io() local
1494 fail_bio_stripe(rbio, bio); in raid56_bio_end_io()
1496 set_bio_pages_uptodate(rbio, bio); in raid56_bio_end_io()
1500 if (atomic_dec_and_test(&rbio->stripes_pending)) in raid56_bio_end_io()
1501 queue_work(rbio->bioc->fs_info->endio_raid56_workers, in raid56_bio_end_io()
1502 &rbio->end_io_work); in raid56_bio_end_io()
1515 struct btrfs_raid_bio *rbio = in raid56_rmw_end_io_work() local
1518 if (atomic_read(&rbio->error) > rbio->bioc->max_errors) { in raid56_rmw_end_io_work()
1519 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid56_rmw_end_io_work()
1527 validate_rbio_for_rmw(rbio); in raid56_rmw_end_io_work()
1534 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) in raid56_rmw_stripe() argument
1538 const int nr_data_sectors = rbio->stripe_nsectors * rbio->nr_data; in raid56_rmw_stripe()
1545 ret = alloc_rbio_pages(rbio); in raid56_rmw_stripe()
1549 index_rbio_pages(rbio); in raid56_rmw_stripe()
1551 atomic_set(&rbio->error, 0); in raid56_rmw_stripe()
1556 int stripe = total_sector_nr / rbio->stripe_nsectors; in raid56_rmw_stripe()
1557 int sectornr = total_sector_nr % rbio->stripe_nsectors; in raid56_rmw_stripe()
1564 sector = sector_in_rbio(rbio, stripe, sectornr, 1); in raid56_rmw_stripe()
1568 sector = rbio_stripe_sector(rbio, stripe, sectornr); in raid56_rmw_stripe()
1576 ret = rbio_add_io_sector(rbio, &bio_list, sector, in raid56_rmw_stripe()
1597 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_rmw_stripe()
1598 INIT_WORK(&rbio->end_io_work, raid56_rmw_end_io_work); in raid56_rmw_stripe()
1605 bio_get_trace_info(rbio, bio, &trace_info); in raid56_rmw_stripe()
1606 trace_raid56_read_partial(rbio, bio, &trace_info); in raid56_rmw_stripe()
1614 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid56_rmw_stripe()
1622 validate_rbio_for_rmw(rbio); in raid56_rmw_stripe()
1630 static int full_stripe_write(struct btrfs_raid_bio *rbio) in full_stripe_write() argument
1634 ret = alloc_rbio_parity_pages(rbio); in full_stripe_write()
1638 ret = lock_stripe_add(rbio); in full_stripe_write()
1640 finish_rmw(rbio); in full_stripe_write()
1649 static int partial_stripe_write(struct btrfs_raid_bio *rbio) in partial_stripe_write() argument
1653 ret = lock_stripe_add(rbio); in partial_stripe_write()
1655 start_async_work(rbio, rmw_work); in partial_stripe_write()
1665 static int __raid56_parity_write(struct btrfs_raid_bio *rbio) in __raid56_parity_write() argument
1668 if (!rbio_is_full(rbio)) in __raid56_parity_write()
1669 return partial_stripe_write(rbio); in __raid56_parity_write()
1670 return full_stripe_write(rbio); in __raid56_parity_write()
1773 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio) in rbio_add_bio() argument
1775 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info; in rbio_add_bio()
1777 const u64 full_stripe_start = rbio->bioc->raid_map[0]; in rbio_add_bio()
1784 rbio->nr_data * BTRFS_STRIPE_LEN); in rbio_add_bio()
1786 bio_list_add(&rbio->bio_list, orig_bio); in rbio_add_bio()
1787 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size; in rbio_add_bio()
1793 fs_info->sectorsize_bits) % rbio->stripe_nsectors; in rbio_add_bio()
1795 set_bit(bit, &rbio->dbitmap); in rbio_add_bio()
1805 struct btrfs_raid_bio *rbio; in raid56_parity_write() local
1810 rbio = alloc_rbio(fs_info, bioc); in raid56_parity_write()
1811 if (IS_ERR(rbio)) { in raid56_parity_write()
1812 ret = PTR_ERR(rbio); in raid56_parity_write()
1815 rbio->operation = BTRFS_RBIO_WRITE; in raid56_parity_write()
1816 rbio_add_bio(rbio, bio); in raid56_parity_write()
1822 if (rbio_is_full(rbio)) { in raid56_parity_write()
1823 ret = full_stripe_write(rbio); in raid56_parity_write()
1825 __free_raid_bio(rbio); in raid56_parity_write()
1838 list_add_tail(&rbio->plug_list, &plug->rbio_list); in raid56_parity_write()
1840 ret = __raid56_parity_write(rbio); in raid56_parity_write()
1842 __free_raid_bio(rbio); in raid56_parity_write()
1859 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) in __raid_recover_end_io() argument
1861 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in __raid_recover_end_io()
1873 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in __raid_recover_end_io()
1883 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); in __raid_recover_end_io()
1889 faila = rbio->faila; in __raid_recover_end_io()
1890 failb = rbio->failb; in __raid_recover_end_io()
1892 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1893 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
1894 spin_lock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1895 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); in __raid_recover_end_io()
1896 spin_unlock_irq(&rbio->bio_list_lock); in __raid_recover_end_io()
1899 index_rbio_pages(rbio); in __raid_recover_end_io()
1901 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) { in __raid_recover_end_io()
1908 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && in __raid_recover_end_io()
1909 !test_bit(sectornr, &rbio->dbitmap)) in __raid_recover_end_io()
1918 for (stripe = 0; stripe < rbio->real_stripes; stripe++) { in __raid_recover_end_io()
1923 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
1924 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && in __raid_recover_end_io()
1926 sector = sector_in_rbio(rbio, stripe, sectornr, 0); in __raid_recover_end_io()
1928 sector = rbio_stripe_sector(rbio, stripe, sectornr); in __raid_recover_end_io()
1937 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) { in __raid_recover_end_io()
1940 if (faila == rbio->nr_data) { in __raid_recover_end_io()
1966 if (rbio->bioc->raid_map[failb] == RAID6_Q_STRIPE) { in __raid_recover_end_io()
1967 if (rbio->bioc->raid_map[faila] == in __raid_recover_end_io()
1979 if (rbio->bioc->raid_map[failb] == RAID5_P_STRIPE) { in __raid_recover_end_io()
1980 raid6_datap_recov(rbio->real_stripes, in __raid_recover_end_io()
1983 raid6_2data_recov(rbio->real_stripes, in __raid_recover_end_io()
1994 memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize); in __raid_recover_end_io()
1998 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) in __raid_recover_end_io()
2000 pointers[rbio->nr_data - 1] = p; in __raid_recover_end_io()
2003 run_xor(pointers, rbio->nr_data - 1, sectorsize); in __raid_recover_end_io()
2011 if (rbio->operation == BTRFS_RBIO_WRITE) { in __raid_recover_end_io()
2012 for (i = 0; i < rbio->stripe_nsectors; i++) { in __raid_recover_end_io()
2014 sector = rbio_stripe_sector(rbio, faila, i); in __raid_recover_end_io()
2018 sector = rbio_stripe_sector(rbio, failb, i); in __raid_recover_end_io()
2023 for (stripe = rbio->real_stripes - 1; stripe >= 0; stripe--) in __raid_recover_end_io()
2039 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid_recover_end_io()
2040 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { in __raid_recover_end_io()
2056 if (err == BLK_STS_OK && rbio->failb < 0) in __raid_recover_end_io()
2057 cache_rbio_pages(rbio); in __raid_recover_end_io()
2059 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in __raid_recover_end_io()
2061 rbio_orig_end_io(rbio, err); in __raid_recover_end_io()
2063 rbio->faila = -1; in __raid_recover_end_io()
2064 rbio->failb = -1; in __raid_recover_end_io()
2066 if (rbio->operation == BTRFS_RBIO_WRITE) in __raid_recover_end_io()
2067 finish_rmw(rbio); in __raid_recover_end_io()
2068 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) in __raid_recover_end_io()
2069 finish_parity_scrub(rbio, 0); in __raid_recover_end_io()
2073 rbio_orig_end_io(rbio, err); in __raid_recover_end_io()
2083 struct btrfs_raid_bio *rbio = in raid_recover_end_io_work() local
2086 if (atomic_read(&rbio->error) > rbio->bioc->max_errors) in raid_recover_end_io_work()
2087 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid_recover_end_io_work()
2089 __raid_recover_end_io(rbio); in raid_recover_end_io_work()
2100 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) in __raid56_parity_recover() argument
2110 ret = alloc_rbio_pages(rbio); in __raid56_parity_recover()
2114 atomic_set(&rbio->error, 0); in __raid56_parity_recover()
2124 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in __raid56_parity_recover()
2126 int stripe = total_sector_nr / rbio->stripe_nsectors; in __raid56_parity_recover()
2127 int sectornr = total_sector_nr % rbio->stripe_nsectors; in __raid56_parity_recover()
2130 if (rbio->faila == stripe || rbio->failb == stripe) { in __raid56_parity_recover()
2131 atomic_inc(&rbio->error); in __raid56_parity_recover()
2134 total_sector_nr += rbio->stripe_nsectors - 1; in __raid56_parity_recover()
2137 sector = rbio_stripe_sector(rbio, stripe, sectornr); in __raid56_parity_recover()
2138 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe, in __raid56_parity_recover()
2151 if (atomic_read(&rbio->error) <= rbio->bioc->max_errors) { in __raid56_parity_recover()
2152 __raid_recover_end_io(rbio); in __raid56_parity_recover()
2163 atomic_set(&rbio->stripes_pending, bios_to_read); in __raid56_parity_recover()
2164 INIT_WORK(&rbio->end_io_work, raid_recover_end_io_work); in __raid56_parity_recover()
2171 bio_get_trace_info(rbio, bio, &trace_info); in __raid56_parity_recover()
2172 trace_raid56_scrub_read_recover(rbio, bio, &trace_info); in __raid56_parity_recover()
2180 if (rbio->operation == BTRFS_RBIO_READ_REBUILD || in __raid56_parity_recover()
2181 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) in __raid56_parity_recover()
2182 rbio_orig_end_io(rbio, BLK_STS_IOERR); in __raid56_parity_recover()
2200 struct btrfs_raid_bio *rbio; in raid56_parity_recover() local
2202 rbio = alloc_rbio(fs_info, bioc); in raid56_parity_recover()
2203 if (IS_ERR(rbio)) { in raid56_parity_recover()
2204 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio)); in raid56_parity_recover()
2208 rbio->operation = BTRFS_RBIO_READ_REBUILD; in raid56_parity_recover()
2209 rbio_add_bio(rbio, bio); in raid56_parity_recover()
2211 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_parity_recover()
2212 if (rbio->faila == -1) { in raid56_parity_recover()
2217 __free_raid_bio(rbio); in raid56_parity_recover()
2233 rbio->failb = rbio->real_stripes - (mirror_num - 1); in raid56_parity_recover()
2234 ASSERT(rbio->failb > 0); in raid56_parity_recover()
2235 if (rbio->failb <= rbio->faila) in raid56_parity_recover()
2236 rbio->failb--; in raid56_parity_recover()
2239 if (lock_stripe_add(rbio)) in raid56_parity_recover()
2246 __raid56_parity_recover(rbio); in raid56_parity_recover()
2255 struct btrfs_raid_bio *rbio; in rmw_work() local
2257 rbio = container_of(work, struct btrfs_raid_bio, work); in rmw_work()
2258 raid56_rmw_stripe(rbio); in rmw_work()
2263 struct btrfs_raid_bio *rbio; in read_rebuild_work() local
2265 rbio = container_of(work, struct btrfs_raid_bio, work); in read_rebuild_work()
2266 __raid56_parity_recover(rbio); in read_rebuild_work()
2285 struct btrfs_raid_bio *rbio; in raid56_parity_alloc_scrub_rbio() local
2288 rbio = alloc_rbio(fs_info, bioc); in raid56_parity_alloc_scrub_rbio()
2289 if (IS_ERR(rbio)) in raid56_parity_alloc_scrub_rbio()
2291 bio_list_add(&rbio->bio_list, bio); in raid56_parity_alloc_scrub_rbio()
2297 rbio->operation = BTRFS_RBIO_PARITY_SCRUB; in raid56_parity_alloc_scrub_rbio()
2304 for (i = rbio->nr_data; i < rbio->real_stripes; i++) { in raid56_parity_alloc_scrub_rbio()
2306 rbio->scrubp = i; in raid56_parity_alloc_scrub_rbio()
2310 ASSERT(i < rbio->real_stripes); in raid56_parity_alloc_scrub_rbio()
2312 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors); in raid56_parity_alloc_scrub_rbio()
2313 return rbio; in raid56_parity_alloc_scrub_rbio()
2317 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, in raid56_add_scrub_pages() argument
2320 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in raid56_add_scrub_pages()
2324 ASSERT(logical >= rbio->bioc->raid_map[0]); in raid56_add_scrub_pages()
2325 ASSERT(logical + sectorsize <= rbio->bioc->raid_map[0] + in raid56_add_scrub_pages()
2326 BTRFS_STRIPE_LEN * rbio->nr_data); in raid56_add_scrub_pages()
2327 stripe_offset = (int)(logical - rbio->bioc->raid_map[0]); in raid56_add_scrub_pages()
2329 rbio->bio_sectors[index].page = page; in raid56_add_scrub_pages()
2330 rbio->bio_sectors[index].pgoff = pgoff; in raid56_add_scrub_pages()
2337 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) in alloc_rbio_essential_pages() argument
2339 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in alloc_rbio_essential_pages()
2342 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in alloc_rbio_essential_pages()
2345 int sectornr = total_sector_nr % rbio->stripe_nsectors; in alloc_rbio_essential_pages()
2348 if (!test_bit(sectornr, &rbio->dbitmap)) in alloc_rbio_essential_pages()
2350 if (rbio->stripe_pages[index]) in alloc_rbio_essential_pages()
2355 rbio->stripe_pages[index] = page; in alloc_rbio_essential_pages()
2357 index_stripe_sectors(rbio); in alloc_rbio_essential_pages()
2361 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, in finish_parity_scrub() argument
2364 struct btrfs_io_context *bioc = rbio->bioc; in finish_parity_scrub()
2366 void **pointers = rbio->finish_pointers; in finish_parity_scrub()
2367 unsigned long *pbitmap = &rbio->finish_pbitmap; in finish_parity_scrub()
2368 int nr_data = rbio->nr_data; in finish_parity_scrub()
2381 if (rbio->real_stripes - rbio->nr_data == 1) in finish_parity_scrub()
2383 else if (rbio->real_stripes - rbio->nr_data == 2) in finish_parity_scrub()
2388 if (bioc->num_tgtdevs && bioc->tgtdev_map[rbio->scrubp]) { in finish_parity_scrub()
2390 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors); in finish_parity_scrub()
2398 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); in finish_parity_scrub()
2419 pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page); in finish_parity_scrub()
2422 atomic_set(&rbio->error, 0); in finish_parity_scrub()
2427 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2433 sector = sector_in_rbio(rbio, stripe, sectornr, 0); in finish_parity_scrub()
2440 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize, in finish_parity_scrub()
2449 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2451 if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0) in finish_parity_scrub()
2452 memcpy(parity, pointers[rbio->scrubp], sectorsize); in finish_parity_scrub()
2455 bitmap_clear(&rbio->dbitmap, sectornr, 1); in finish_parity_scrub()
2466 kunmap_local(pointers[rbio->real_stripes - 1]); in finish_parity_scrub()
2477 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2480 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2481 ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp, in finish_parity_scrub()
2490 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) { in finish_parity_scrub()
2493 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr); in finish_parity_scrub()
2494 ret = rbio_add_io_sector(rbio, &bio_list, sector, in finish_parity_scrub()
2495 bioc->tgtdev_map[rbio->scrubp], in finish_parity_scrub()
2505 rbio_orig_end_io(rbio, BLK_STS_OK); in finish_parity_scrub()
2509 atomic_set(&rbio->stripes_pending, nr_data); in finish_parity_scrub()
2517 bio_get_trace_info(rbio, bio, &trace_info); in finish_parity_scrub()
2518 trace_raid56_scrub_write_stripe(rbio, bio, &trace_info); in finish_parity_scrub()
2525 rbio_orig_end_io(rbio, BLK_STS_IOERR); in finish_parity_scrub()
2531 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) in is_data_stripe() argument
2533 if (stripe >= 0 && stripe < rbio->nr_data) in is_data_stripe()
2545 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) in validate_rbio_for_parity_scrub() argument
2547 if (atomic_read(&rbio->error) > rbio->bioc->max_errors) in validate_rbio_for_parity_scrub()
2550 if (rbio->faila >= 0 || rbio->failb >= 0) { in validate_rbio_for_parity_scrub()
2553 if (is_data_stripe(rbio, rbio->faila)) in validate_rbio_for_parity_scrub()
2555 else if (is_parity_stripe(rbio->faila)) in validate_rbio_for_parity_scrub()
2556 failp = rbio->faila; in validate_rbio_for_parity_scrub()
2558 if (is_data_stripe(rbio, rbio->failb)) in validate_rbio_for_parity_scrub()
2560 else if (is_parity_stripe(rbio->failb)) in validate_rbio_for_parity_scrub()
2561 failp = rbio->failb; in validate_rbio_for_parity_scrub()
2568 if (dfail > rbio->bioc->max_errors - 1) in validate_rbio_for_parity_scrub()
2576 finish_parity_scrub(rbio, 0); in validate_rbio_for_parity_scrub()
2586 if (failp != rbio->scrubp) in validate_rbio_for_parity_scrub()
2589 __raid_recover_end_io(rbio); in validate_rbio_for_parity_scrub()
2591 finish_parity_scrub(rbio, 1); in validate_rbio_for_parity_scrub()
2596 rbio_orig_end_io(rbio, BLK_STS_IOERR); in validate_rbio_for_parity_scrub()
2609 struct btrfs_raid_bio *rbio = in raid56_parity_scrub_end_io_work() local
2616 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_end_io_work()
2619 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) in raid56_parity_scrub_stripe() argument
2629 ret = alloc_rbio_essential_pages(rbio); in raid56_parity_scrub_stripe()
2633 atomic_set(&rbio->error, 0); in raid56_parity_scrub_stripe()
2635 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in raid56_parity_scrub_stripe()
2637 int sectornr = total_sector_nr % rbio->stripe_nsectors; in raid56_parity_scrub_stripe()
2638 int stripe = total_sector_nr / rbio->stripe_nsectors; in raid56_parity_scrub_stripe()
2642 if (!test_bit(sectornr, &rbio->dbitmap)) in raid56_parity_scrub_stripe()
2650 sector = sector_in_rbio(rbio, stripe, sectornr, 1); in raid56_parity_scrub_stripe()
2654 sector = rbio_stripe_sector(rbio, stripe, sectornr); in raid56_parity_scrub_stripe()
2662 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe, in raid56_parity_scrub_stripe()
2683 atomic_set(&rbio->stripes_pending, bios_to_read); in raid56_parity_scrub_stripe()
2684 INIT_WORK(&rbio->end_io_work, raid56_parity_scrub_end_io_work); in raid56_parity_scrub_stripe()
2691 bio_get_trace_info(rbio, bio, &trace_info); in raid56_parity_scrub_stripe()
2692 trace_raid56_scrub_read(rbio, bio, &trace_info); in raid56_parity_scrub_stripe()
2700 rbio_orig_end_io(rbio, BLK_STS_IOERR); in raid56_parity_scrub_stripe()
2708 validate_rbio_for_parity_scrub(rbio); in raid56_parity_scrub_stripe()
2713 struct btrfs_raid_bio *rbio; in scrub_parity_work() local
2715 rbio = container_of(work, struct btrfs_raid_bio, work); in scrub_parity_work()
2716 raid56_parity_scrub_stripe(rbio); in scrub_parity_work()
2719 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) in raid56_parity_submit_scrub_rbio() argument
2721 if (!lock_stripe_add(rbio)) in raid56_parity_submit_scrub_rbio()
2722 start_async_work(rbio, scrub_parity_work); in raid56_parity_submit_scrub_rbio()
2731 struct btrfs_raid_bio *rbio; in raid56_alloc_missing_rbio() local
2733 rbio = alloc_rbio(fs_info, bioc); in raid56_alloc_missing_rbio()
2734 if (IS_ERR(rbio)) in raid56_alloc_missing_rbio()
2737 rbio->operation = BTRFS_RBIO_REBUILD_MISSING; in raid56_alloc_missing_rbio()
2738 bio_list_add(&rbio->bio_list, bio); in raid56_alloc_missing_rbio()
2745 rbio->faila = find_logical_bio_stripe(rbio, bio); in raid56_alloc_missing_rbio()
2746 if (rbio->faila == -1) { in raid56_alloc_missing_rbio()
2750 __free_raid_bio(rbio); in raid56_alloc_missing_rbio()
2754 return rbio; in raid56_alloc_missing_rbio()
2757 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) in raid56_submit_missing_rbio() argument
2759 if (!lock_stripe_add(rbio)) in raid56_submit_missing_rbio()
2760 start_async_work(rbio, read_rebuild_work); in raid56_submit_missing_rbio()