Lines Matching refs:r10_bio
73 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
134 struct r10bio *r10_bio; in r10buf_pool_alloc() local
140 r10_bio = r10bio_pool_alloc(gfp_flags, conf); in r10buf_pool_alloc()
141 if (!r10_bio) in r10buf_pool_alloc()
167 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
174 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
181 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
188 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
199 rp->raid_bio = r10_bio; in r10buf_pool_alloc()
207 return r10_bio; in r10buf_pool_alloc()
216 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
217 bio_uninit(r10_bio->devs[j].bio); in r10buf_pool_alloc()
218 kfree(r10_bio->devs[j].bio); in r10buf_pool_alloc()
219 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
220 bio_uninit(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
221 kfree(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
225 rbio_pool_free(r10_bio, conf); in r10buf_pool_alloc()
259 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio) in put_all_bios() argument
264 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
268 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
269 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio)) in put_all_bios()
275 static void free_r10bio(struct r10bio *r10_bio) in free_r10bio() argument
277 struct r10conf *conf = r10_bio->mddev->private; in free_r10bio()
279 put_all_bios(conf, r10_bio); in free_r10bio()
280 mempool_free(r10_bio, &conf->r10bio_pool); in free_r10bio()
283 static void put_buf(struct r10bio *r10_bio) in put_buf() argument
285 struct r10conf *conf = r10_bio->mddev->private; in put_buf()
287 mempool_free(r10_bio, &conf->r10buf_pool); in put_buf()
298 static void reschedule_retry(struct r10bio *r10_bio) in reschedule_retry() argument
301 struct mddev *mddev = r10_bio->mddev; in reschedule_retry()
305 list_add(&r10_bio->retry_list, &conf->retry_list); in reschedule_retry()
320 static void raid_end_bio_io(struct r10bio *r10_bio) in raid_end_bio_io() argument
322 struct bio *bio = r10_bio->master_bio; in raid_end_bio_io()
323 struct r10conf *conf = r10_bio->mddev->private; in raid_end_bio_io()
325 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in raid_end_bio_io()
329 bio_end_io_acct(bio, r10_bio->start_time); in raid_end_bio_io()
337 free_r10bio(r10_bio); in raid_end_bio_io()
343 static inline void update_head_pos(int slot, struct r10bio *r10_bio) in update_head_pos() argument
345 struct r10conf *conf = r10_bio->mddev->private; in update_head_pos()
347 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
348 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
354 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio, in find_bio_disk() argument
361 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
363 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
369 update_head_pos(slot, r10_bio); in find_bio_disk()
375 return r10_bio->devs[slot].devnum; in find_bio_disk()
381 struct r10bio *r10_bio = bio->bi_private; in raid10_end_read_request() local
384 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_read_request()
386 slot = r10_bio->read_slot; in raid10_end_read_request()
387 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
391 update_head_pos(slot, r10_bio); in raid10_end_read_request()
403 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_read_request()
410 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state), in raid10_end_read_request()
415 raid_end_bio_io(r10_bio); in raid10_end_read_request()
424 (unsigned long long)r10_bio->sector); in raid10_end_read_request()
425 set_bit(R10BIO_ReadError, &r10_bio->state); in raid10_end_read_request()
426 reschedule_retry(r10_bio); in raid10_end_read_request()
430 static void close_write(struct r10bio *r10_bio) in close_write() argument
433 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector, in close_write()
434 r10_bio->sectors, in close_write()
435 !test_bit(R10BIO_Degraded, &r10_bio->state), in close_write()
437 md_write_end(r10_bio->mddev); in close_write()
440 static void one_write_done(struct r10bio *r10_bio) in one_write_done() argument
442 if (atomic_dec_and_test(&r10_bio->remaining)) { in one_write_done()
443 if (test_bit(R10BIO_WriteError, &r10_bio->state)) in one_write_done()
444 reschedule_retry(r10_bio); in one_write_done()
446 close_write(r10_bio); in one_write_done()
447 if (test_bit(R10BIO_MadeGood, &r10_bio->state)) in one_write_done()
448 reschedule_retry(r10_bio); in one_write_done()
450 raid_end_bio_io(r10_bio); in one_write_done()
457 struct r10bio *r10_bio = bio->bi_private; in raid10_end_write_request() local
460 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_write_request()
468 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_write_request()
503 set_bit(R10BIO_WriteError, &r10_bio->state); in raid10_end_write_request()
506 set_bit(R10BIO_Degraded, &r10_bio->state); in raid10_end_write_request()
507 r10_bio->devs[slot].bio = NULL; in raid10_end_write_request()
535 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_write_request()
539 r10_bio->devs[slot].addr, in raid10_end_write_request()
540 r10_bio->sectors, in raid10_end_write_request()
544 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
546 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
548 set_bit(R10BIO_MadeGood, &r10_bio->state); in raid10_end_write_request()
557 one_write_done(r10_bio); in raid10_end_write_request()
732 struct r10bio *r10_bio, in read_balance() argument
735 const sector_t this_sector = r10_bio->sector; in read_balance()
737 int sectors = r10_bio->sectors; in read_balance()
747 raid10_find_phys(conf, r10_bio); in read_balance()
756 clear_bit(R10BIO_FailFast, &r10_bio->state); in read_balance()
777 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
779 disk = r10_bio->devs[slot].devnum; in read_balance()
782 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
788 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
791 dev_sector = r10_bio->devs[slot].addr; in read_balance()
837 set_bit(R10BIO_FailFast, &r10_bio->state); in read_balance()
847 new_distance = r10_bio->devs[slot].addr; in read_balance()
849 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
870 r10_bio->read_slot = slot; in read_balance()
1093 static sector_t choose_data_offset(struct r10bio *r10_bio, in choose_data_offset() argument
1097 test_bit(R10BIO_Previous, &r10_bio->state)) in choose_data_offset()
1176 struct r10bio *r10_bio) in raid10_read_request() argument
1185 int slot = r10_bio->read_slot; in raid10_read_request()
1189 if (slot >= 0 && r10_bio->devs[slot].rdev) { in raid10_read_request()
1205 disk = r10_bio->devs[slot].devnum; in raid10_read_request()
1212 err_rdev = r10_bio->devs[slot].rdev; in raid10_read_request()
1217 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) in raid10_read_request()
1219 rdev = read_balance(conf, r10_bio, &max_sectors); in raid10_read_request()
1224 (unsigned long long)r10_bio->sector); in raid10_read_request()
1226 raid_end_bio_io(r10_bio); in raid10_read_request()
1233 (unsigned long long)r10_bio->sector); in raid10_read_request()
1242 r10_bio->master_bio = bio; in raid10_read_request()
1243 r10_bio->sectors = max_sectors; in raid10_read_request()
1245 slot = r10_bio->read_slot; in raid10_read_request()
1248 r10_bio->start_time = bio_start_io_acct(bio); in raid10_read_request()
1251 r10_bio->devs[slot].bio = read_bio; in raid10_read_request()
1252 r10_bio->devs[slot].rdev = rdev; in raid10_read_request()
1254 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in raid10_read_request()
1255 choose_data_offset(r10_bio, rdev); in raid10_read_request()
1259 test_bit(R10BIO_FailFast, &r10_bio->state)) in raid10_read_request()
1261 read_bio->bi_private = r10_bio; in raid10_read_request()
1265 r10_bio->sector); in raid10_read_request()
1270 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, in raid10_write_one_disk() argument
1282 int devnum = r10_bio->devs[n_copy].devnum; in raid10_write_one_disk()
1297 r10_bio->devs[n_copy].repl_bio = mbio; in raid10_write_one_disk()
1299 r10_bio->devs[n_copy].bio = mbio; in raid10_write_one_disk()
1301 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + in raid10_write_one_disk()
1302 choose_data_offset(r10_bio, rdev)); in raid10_write_one_disk()
1309 mbio->bi_private = r10_bio; in raid10_write_one_disk()
1313 r10_bio->sector); in raid10_write_one_disk()
1317 atomic_inc(&r10_bio->remaining); in raid10_write_one_disk()
1334 static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio) in wait_blocked_dev() argument
1362 sector_t dev_sector = r10_bio->devs[i].addr; in wait_blocked_dev()
1370 if (!r10_bio->sectors) in wait_blocked_dev()
1373 is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors, in wait_blocked_dev()
1401 struct r10bio *r10_bio) in raid10_write_request() argument
1429 sectors = r10_bio->sectors; in raid10_write_request()
1465 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ in raid10_write_request()
1466 raid10_find_phys(conf, r10_bio); in raid10_write_request()
1468 wait_blocked_dev(mddev, r10_bio); in raid10_write_request()
1471 max_sectors = r10_bio->sectors; in raid10_write_request()
1474 int d = r10_bio->devs[i].devnum; in raid10_write_request()
1485 r10_bio->devs[i].bio = NULL; in raid10_write_request()
1486 r10_bio->devs[i].repl_bio = NULL; in raid10_write_request()
1489 set_bit(R10BIO_Degraded, &r10_bio->state); in raid10_write_request()
1494 sector_t dev_sector = r10_bio->devs[i].addr; in raid10_write_request()
1525 r10_bio->devs[i].bio = bio; in raid10_write_request()
1529 r10_bio->devs[i].repl_bio = bio; in raid10_write_request()
1535 if (max_sectors < r10_bio->sectors) in raid10_write_request()
1536 r10_bio->sectors = max_sectors; in raid10_write_request()
1538 if (r10_bio->sectors < bio_sectors(bio)) { in raid10_write_request()
1539 struct bio *split = bio_split(bio, r10_bio->sectors, in raid10_write_request()
1546 r10_bio->master_bio = bio; in raid10_write_request()
1550 r10_bio->start_time = bio_start_io_acct(bio); in raid10_write_request()
1551 atomic_set(&r10_bio->remaining, 1); in raid10_write_request()
1552 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); in raid10_write_request()
1555 if (r10_bio->devs[i].bio) in raid10_write_request()
1556 raid10_write_one_disk(mddev, r10_bio, bio, false, i); in raid10_write_request()
1557 if (r10_bio->devs[i].repl_bio) in raid10_write_request()
1558 raid10_write_one_disk(mddev, r10_bio, bio, true, i); in raid10_write_request()
1560 one_write_done(r10_bio); in raid10_write_request()
1566 struct r10bio *r10_bio; in __make_request() local
1568 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); in __make_request()
1570 r10_bio->master_bio = bio; in __make_request()
1571 r10_bio->sectors = sectors; in __make_request()
1573 r10_bio->mddev = mddev; in __make_request()
1574 r10_bio->sector = bio->bi_iter.bi_sector; in __make_request()
1575 r10_bio->state = 0; in __make_request()
1576 r10_bio->read_slot = -1; in __make_request()
1577 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * in __make_request()
1581 raid10_read_request(mddev, bio, r10_bio); in __make_request()
1583 raid10_write_request(mddev, bio, r10_bio); in __make_request()
1610 struct r10bio *r10_bio = bio->bi_private; in raid10_end_discard_request() local
1611 struct r10conf *conf = r10_bio->mddev->private; in raid10_end_discard_request()
1619 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in raid10_end_discard_request()
1620 set_bit(R10BIO_Uptodate, &r10_bio->state); in raid10_end_discard_request()
1622 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in raid10_end_discard_request()
1635 raid_end_discard_bio(r10_bio); in raid10_end_discard_request()
1651 struct r10bio *r10_bio, *first_r10bio; in raid10_handle_discard() local
1756 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO); in raid10_handle_discard()
1757 r10_bio->mddev = mddev; in raid10_handle_discard()
1758 r10_bio->state = 0; in raid10_handle_discard()
1759 r10_bio->sectors = 0; in raid10_handle_discard()
1760 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks); in raid10_handle_discard()
1761 wait_blocked_dev(mddev, r10_bio); in raid10_handle_discard()
1771 r10_bio->master_bio = bio; in raid10_handle_discard()
1772 set_bit(R10BIO_Discard, &r10_bio->state); in raid10_handle_discard()
1774 first_r10bio = r10_bio; in raid10_handle_discard()
1776 r10_bio->master_bio = (struct bio *)first_r10bio; in raid10_handle_discard()
1789 r10_bio->devs[disk].bio = NULL; in raid10_handle_discard()
1790 r10_bio->devs[disk].repl_bio = NULL; in raid10_handle_discard()
1800 r10_bio->devs[disk].bio = bio; in raid10_handle_discard()
1804 r10_bio->devs[disk].repl_bio = bio; in raid10_handle_discard()
1810 atomic_set(&r10_bio->remaining, 1); in raid10_handle_discard()
1847 if (r10_bio->devs[disk].bio) { in raid10_handle_discard()
1852 mbio->bi_private = r10_bio; in raid10_handle_discard()
1853 r10_bio->devs[disk].bio = mbio; in raid10_handle_discard()
1854 r10_bio->devs[disk].devnum = disk; in raid10_handle_discard()
1855 atomic_inc(&r10_bio->remaining); in raid10_handle_discard()
1857 dev_start + choose_data_offset(r10_bio, rdev), in raid10_handle_discard()
1861 if (r10_bio->devs[disk].repl_bio) { in raid10_handle_discard()
1866 rbio->bi_private = r10_bio; in raid10_handle_discard()
1867 r10_bio->devs[disk].repl_bio = rbio; in raid10_handle_discard()
1868 r10_bio->devs[disk].devnum = disk; in raid10_handle_discard()
1869 atomic_inc(&r10_bio->remaining); in raid10_handle_discard()
1871 dev_start + choose_data_offset(r10_bio, rrdev), in raid10_handle_discard()
1883 raid_end_discard_bio(r10_bio); in raid10_handle_discard()
1888 raid_end_discard_bio(r10_bio); in raid10_handle_discard()
2269 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d) in __end_sync_read() argument
2271 struct r10conf *conf = r10_bio->mddev->private; in __end_sync_read()
2274 set_bit(R10BIO_Uptodate, &r10_bio->state); in __end_sync_read()
2279 atomic_add(r10_bio->sectors, in __end_sync_read()
2286 if (test_bit(R10BIO_IsRecover, &r10_bio->state) || in __end_sync_read()
2287 atomic_dec_and_test(&r10_bio->remaining)) { in __end_sync_read()
2291 reschedule_retry(r10_bio); in __end_sync_read()
2297 struct r10bio *r10_bio = get_resync_r10bio(bio); in end_sync_read() local
2298 struct r10conf *conf = r10_bio->mddev->private; in end_sync_read()
2299 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL); in end_sync_read()
2301 __end_sync_read(r10_bio, bio, d); in end_sync_read()
2307 struct r10bio *r10_bio = bio->bi_private; in end_reshape_read() local
2309 __end_sync_read(r10_bio, bio, r10_bio->read_slot); in end_reshape_read()
2312 static void end_sync_request(struct r10bio *r10_bio) in end_sync_request() argument
2314 struct mddev *mddev = r10_bio->mddev; in end_sync_request()
2316 while (atomic_dec_and_test(&r10_bio->remaining)) { in end_sync_request()
2317 if (r10_bio->master_bio == NULL) { in end_sync_request()
2319 sector_t s = r10_bio->sectors; in end_sync_request()
2320 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in end_sync_request()
2321 test_bit(R10BIO_WriteError, &r10_bio->state)) in end_sync_request()
2322 reschedule_retry(r10_bio); in end_sync_request()
2324 put_buf(r10_bio); in end_sync_request()
2328 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio; in end_sync_request()
2329 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in end_sync_request()
2330 test_bit(R10BIO_WriteError, &r10_bio->state)) in end_sync_request()
2331 reschedule_retry(r10_bio); in end_sync_request()
2333 put_buf(r10_bio); in end_sync_request()
2334 r10_bio = r10_bio2; in end_sync_request()
2341 struct r10bio *r10_bio = get_resync_r10bio(bio); in end_sync_write() local
2342 struct mddev *mddev = r10_bio->mddev; in end_sync_write()
2351 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_sync_write()
2365 set_bit(R10BIO_WriteError, &r10_bio->state); in end_sync_write()
2368 r10_bio->devs[slot].addr, in end_sync_write()
2369 r10_bio->sectors, in end_sync_write()
2371 set_bit(R10BIO_MadeGood, &r10_bio->state); in end_sync_write()
2375 end_sync_request(r10_bio); in end_sync_write()
2394 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) in sync_request_write() argument
2402 atomic_set(&r10_bio->remaining, 1); in sync_request_write()
2406 if (!r10_bio->devs[i].bio->bi_status) in sync_request_write()
2413 fbio = r10_bio->devs[i].bio; in sync_request_write()
2414 fbio->bi_iter.bi_size = r10_bio->sectors << 9; in sync_request_write()
2418 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); in sync_request_write()
2425 tbio = r10_bio->devs[i].bio; in sync_request_write()
2433 d = r10_bio->devs[i].devnum; in sync_request_write()
2435 if (!r10_bio->devs[i].bio->bi_status) { in sync_request_write()
2440 int sectors = r10_bio->sectors; in sync_request_write()
2453 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches); in sync_request_write()
2472 rp->raid_bio = r10_bio; in sync_request_write()
2474 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2480 atomic_inc(&r10_bio->remaining); in sync_request_write()
2495 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2498 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2499 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2501 d = r10_bio->devs[i].devnum; in sync_request_write()
2502 atomic_inc(&r10_bio->remaining); in sync_request_write()
2509 if (atomic_dec_and_test(&r10_bio->remaining)) { in sync_request_write()
2510 md_done_sync(mddev, r10_bio->sectors, 1); in sync_request_write()
2511 put_buf(r10_bio); in sync_request_write()
2525 static void fix_recovery_read_error(struct r10bio *r10_bio) in fix_recovery_read_error() argument
2534 struct mddev *mddev = r10_bio->mddev; in fix_recovery_read_error()
2536 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2538 int sectors = r10_bio->sectors; in fix_recovery_read_error()
2540 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2541 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2554 addr = r10_bio->devs[0].addr + sect, in fix_recovery_read_error()
2562 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2586 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2608 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) in recovery_request_write() argument
2614 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) { in recovery_request_write()
2615 fix_recovery_read_error(r10_bio); in recovery_request_write()
2616 end_sync_request(r10_bio); in recovery_request_write()
2624 d = r10_bio->devs[1].devnum; in recovery_request_write()
2625 wbio = r10_bio->devs[1].bio; in recovery_request_write()
2626 wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2714 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio) in fix_read_error() argument
2717 int sectors = r10_bio->sectors; in fix_read_error()
2720 int d = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2741 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED; in fix_read_error()
2747 int sl = r10_bio->read_slot; in fix_read_error()
2759 d = r10_bio->devs[sl].devnum; in fix_read_error()
2764 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s, in fix_read_error()
2769 r10_bio->devs[sl].addr + in fix_read_error()
2782 } while (!success && sl != r10_bio->read_slot); in fix_read_error()
2790 int dn = r10_bio->devs[r10_bio->read_slot].devnum; in fix_read_error()
2795 r10_bio->devs[r10_bio->read_slot].addr in fix_read_error()
2799 r10_bio->devs[r10_bio->read_slot].bio in fix_read_error()
2808 while (sl != r10_bio->read_slot) { in fix_read_error()
2812 d = r10_bio->devs[sl].devnum; in fix_read_error()
2822 r10_bio->devs[sl].addr + in fix_read_error()
2831 choose_data_offset(r10_bio, in fix_read_error()
2842 while (sl != r10_bio->read_slot) { in fix_read_error()
2846 d = r10_bio->devs[sl].devnum; in fix_read_error()
2856 r10_bio->devs[sl].addr + in fix_read_error()
2865 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2876 choose_data_offset(r10_bio, rdev)), in fix_read_error()
2891 static int narrow_write_error(struct r10bio *r10_bio, int i) in narrow_write_error() argument
2893 struct bio *bio = r10_bio->master_bio; in narrow_write_error()
2894 struct mddev *mddev = r10_bio->mddev; in narrow_write_error()
2896 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2911 int sect_to_write = r10_bio->sectors; in narrow_write_error()
2919 sector = r10_bio->sector; in narrow_write_error()
2920 sectors = ((r10_bio->sector + block_sectors) in narrow_write_error()
2933 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); in narrow_write_error()
2935 choose_data_offset(r10_bio, rdev); in narrow_write_error()
2952 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) in handle_read_error() argument
2954 int slot = r10_bio->read_slot; in handle_read_error()
2957 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2967 bio = r10_bio->devs[slot].bio; in handle_read_error()
2969 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2972 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2975 fix_read_error(conf, mddev, r10_bio); in handle_read_error()
2982 r10_bio->state = 0; in handle_read_error()
2983 raid10_read_request(mddev, r10_bio->master_bio, r10_bio); in handle_read_error()
2986 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) in handle_write_completed() argument
2997 if (test_bit(R10BIO_IsSync, &r10_bio->state) || in handle_write_completed()
2998 test_bit(R10BIO_IsRecover, &r10_bio->state)) { in handle_write_completed()
3000 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
3002 if (r10_bio->devs[m].bio == NULL || in handle_write_completed()
3003 r10_bio->devs[m].bio->bi_end_io == NULL) in handle_write_completed()
3005 if (!r10_bio->devs[m].bio->bi_status) { in handle_write_completed()
3008 r10_bio->devs[m].addr, in handle_write_completed()
3009 r10_bio->sectors, 0); in handle_write_completed()
3013 r10_bio->devs[m].addr, in handle_write_completed()
3014 r10_bio->sectors, 0)) in handle_write_completed()
3018 if (r10_bio->devs[m].repl_bio == NULL || in handle_write_completed()
3019 r10_bio->devs[m].repl_bio->bi_end_io == NULL) in handle_write_completed()
3022 if (!r10_bio->devs[m].repl_bio->bi_status) { in handle_write_completed()
3025 r10_bio->devs[m].addr, in handle_write_completed()
3026 r10_bio->sectors, 0); in handle_write_completed()
3030 r10_bio->devs[m].addr, in handle_write_completed()
3031 r10_bio->sectors, 0)) in handle_write_completed()
3035 put_buf(r10_bio); in handle_write_completed()
3039 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
3040 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
3045 r10_bio->devs[m].addr, in handle_write_completed()
3046 r10_bio->sectors, 0); in handle_write_completed()
3050 if (!narrow_write_error(r10_bio, m)) { in handle_write_completed()
3053 &r10_bio->state); in handle_write_completed()
3057 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
3062 r10_bio->devs[m].addr, in handle_write_completed()
3063 r10_bio->sectors, 0); in handle_write_completed()
3069 list_add(&r10_bio->retry_list, &conf->bio_end_io_list); in handle_write_completed()
3080 &r10_bio->state)) in handle_write_completed()
3081 close_write(r10_bio); in handle_write_completed()
3082 raid_end_bio_io(r10_bio); in handle_write_completed()
3090 struct r10bio *r10_bio; in raid10d() local
3110 r10_bio = list_first_entry(&tmp, struct r10bio, in raid10d()
3112 list_del(&r10_bio->retry_list); in raid10d()
3114 set_bit(R10BIO_Degraded, &r10_bio->state); in raid10d()
3117 &r10_bio->state)) in raid10d()
3118 close_write(r10_bio); in raid10d()
3119 raid_end_bio_io(r10_bio); in raid10d()
3133 r10_bio = list_entry(head->prev, struct r10bio, retry_list); in raid10d()
3138 mddev = r10_bio->mddev; in raid10d()
3140 if (test_bit(R10BIO_MadeGood, &r10_bio->state) || in raid10d()
3141 test_bit(R10BIO_WriteError, &r10_bio->state)) in raid10d()
3142 handle_write_completed(conf, r10_bio); in raid10d()
3143 else if (test_bit(R10BIO_IsReshape, &r10_bio->state)) in raid10d()
3144 reshape_request_write(mddev, r10_bio); in raid10d()
3145 else if (test_bit(R10BIO_IsSync, &r10_bio->state)) in raid10d()
3146 sync_request_write(mddev, r10_bio); in raid10d()
3147 else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) in raid10d()
3148 recovery_request_write(mddev, r10_bio); in raid10d()
3149 else if (test_bit(R10BIO_ReadError, &r10_bio->state)) in raid10d()
3150 handle_read_error(mddev, r10_bio); in raid10d()
3281 struct r10bio *r10_bio; in raid10_sync_request() local
3417 r10_bio = NULL; in raid10_sync_request()
3449 rb2 = r10_bio; in raid10_sync_request()
3483 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3484 r10_bio->state = 0; in raid10_sync_request()
3486 atomic_set(&r10_bio->remaining, 0); in raid10_sync_request()
3488 r10_bio->master_bio = (struct bio*)rb2; in raid10_sync_request()
3491 r10_bio->mddev = mddev; in raid10_sync_request()
3492 set_bit(R10BIO_IsRecover, &r10_bio->state); in raid10_sync_request()
3493 r10_bio->sector = sect; in raid10_sync_request()
3495 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3516 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3527 sector = r10_bio->devs[j].addr; in raid10_sync_request()
3541 bio = r10_bio->devs[0].bio; in raid10_sync_request()
3548 from_addr = r10_bio->devs[j].addr; in raid10_sync_request()
3556 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3559 to_addr = r10_bio->devs[k].addr; in raid10_sync_request()
3560 r10_bio->devs[0].devnum = d; in raid10_sync_request()
3561 r10_bio->devs[0].addr = from_addr; in raid10_sync_request()
3562 r10_bio->devs[1].devnum = i; in raid10_sync_request()
3563 r10_bio->devs[1].addr = to_addr; in raid10_sync_request()
3566 bio = r10_bio->devs[1].bio; in raid10_sync_request()
3574 atomic_inc(&r10_bio->remaining); in raid10_sync_request()
3576 r10_bio->devs[1].bio->bi_end_io = NULL; in raid10_sync_request()
3579 bio = r10_bio->devs[1].repl_bio; in raid10_sync_request()
3595 atomic_inc(&r10_bio->remaining); in raid10_sync_request()
3608 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3614 r10_bio->devs[k].addr, in raid10_sync_request()
3620 r10_bio->devs[k].addr, in raid10_sync_request()
3632 put_buf(r10_bio); in raid10_sync_request()
3635 r10_bio = rb2; in raid10_sync_request()
3644 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { in raid10_sync_request()
3651 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3658 r10_bio->devs[0].bio->bi_opf in raid10_sync_request()
3663 while (r10_bio) { in raid10_sync_request()
3664 struct r10bio *rb2 = r10_bio; in raid10_sync_request()
3665 r10_bio = (struct r10bio*) rb2->master_bio; in raid10_sync_request()
3696 r10_bio = raid10_alloc_init_r10buf(conf); in raid10_sync_request()
3697 r10_bio->state = 0; in raid10_sync_request()
3699 r10_bio->mddev = mddev; in raid10_sync_request()
3700 atomic_set(&r10_bio->remaining, 0); in raid10_sync_request()
3704 r10_bio->master_bio = NULL; in raid10_sync_request()
3705 r10_bio->sector = sector_nr; in raid10_sync_request()
3706 set_bit(R10BIO_IsSync, &r10_bio->state); in raid10_sync_request()
3707 raid10_find_phys(conf, r10_bio); in raid10_sync_request()
3708 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1; in raid10_sync_request()
3711 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3716 if (r10_bio->devs[i].repl_bio) in raid10_sync_request()
3717 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in raid10_sync_request()
3719 bio = r10_bio->devs[i].bio; in raid10_sync_request()
3727 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3741 atomic_inc(&r10_bio->remaining); in raid10_sync_request()
3760 bio = r10_bio->devs[i].repl_bio; in raid10_sync_request()
3763 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3778 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3779 if (r10_bio->devs[i].bio->bi_end_io) in raid10_sync_request()
3782 if (r10_bio->devs[i].repl_bio && in raid10_sync_request()
3783 r10_bio->devs[i].repl_bio->bi_end_io) in raid10_sync_request()
3788 put_buf(r10_bio); in raid10_sync_request()
3816 r10_bio->sectors = nr_sectors; in raid10_sync_request()
3869 r10_bio = get_resync_r10bio(bio); in raid10_sync_request()
3870 r10_bio->sectors = nr_sectors; in raid10_sync_request()
4787 struct r10bio *r10_bio; in reshape_request() local
4892 r10_bio = raid10_alloc_init_r10buf(conf); in reshape_request()
4893 r10_bio->state = 0; in reshape_request()
4895 atomic_set(&r10_bio->remaining, 0); in reshape_request()
4896 r10_bio->mddev = mddev; in reshape_request()
4897 r10_bio->sector = sector_nr; in reshape_request()
4898 set_bit(R10BIO_IsReshape, &r10_bio->state); in reshape_request()
4899 r10_bio->sectors = last - sector_nr + 1; in reshape_request()
4900 rdev = read_balance(conf, r10_bio, &max_sectors); in reshape_request()
4901 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state)); in reshape_request()
4908 mempool_free(r10_bio, &conf->r10buf_pool); in reshape_request()
4915 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4917 read_bio->bi_private = r10_bio; in reshape_request()
4919 r10_bio->master_bio = read_bio; in reshape_request()
4920 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4949 __raid10_find_phys(&conf->geo, r10_bio); in reshape_request()
4957 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4961 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4964 b = r10_bio->devs[s/2].bio; in reshape_request()
4970 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4981 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in reshape_request()
4998 r10_bio->sectors = nr_sectors; in reshape_request()
5001 md_sync_acct_bio(read_bio, r10_bio->sectors); in reshape_request()
5002 atomic_inc(&r10_bio->remaining); in reshape_request()
5022 static void end_reshape_request(struct r10bio *r10_bio);
5024 struct r10bio *r10_bio);
5025 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio) in reshape_request_write() argument
5035 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) in reshape_request_write()
5036 if (handle_reshape_read_error(mddev, r10_bio) < 0) { in reshape_request_write()
5038 md_done_sync(mddev, r10_bio->sectors, 0); in reshape_request_write()
5045 atomic_set(&r10_bio->remaining, 1); in reshape_request_write()
5048 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
5053 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
5056 b = r10_bio->devs[s/2].bio; in reshape_request_write()
5064 md_sync_acct_bio(b, r10_bio->sectors); in reshape_request_write()
5065 atomic_inc(&r10_bio->remaining); in reshape_request_write()
5069 end_reshape_request(r10_bio); in reshape_request_write()
5104 struct r10bio *r10_bio) in handle_reshape_read_error() argument
5107 int sectors = r10_bio->sectors; in handle_reshape_read_error()
5121 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in handle_reshape_read_error()
5123 r10b->sector = r10_bio->sector; in handle_reshape_read_error()
5180 struct r10bio *r10_bio = get_resync_r10bio(bio); in end_reshape_write() local
5181 struct mddev *mddev = r10_bio->mddev; in end_reshape_write()
5188 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); in end_reshape_write()
5202 end_reshape_request(r10_bio); in end_reshape_write()
5205 static void end_reshape_request(struct r10bio *r10_bio) in end_reshape_request() argument
5207 if (!atomic_dec_and_test(&r10_bio->remaining)) in end_reshape_request()
5209 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1); in end_reshape_request()
5210 bio_put(r10_bio->master_bio); in end_reshape_request()
5211 put_buf(r10_bio); in end_reshape_request()