Lines Matching refs:sh

133 static inline int raid6_d0(struct stripe_head *sh)  in raid6_d0()  argument
135 if (sh->ddf_layout) in raid6_d0()
139 if (sh->qd_idx == sh->disks - 1) in raid6_d0()
142 return sh->qd_idx + 1; in raid6_d0()
155 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, in raid6_idx_to_slot() argument
160 if (sh->ddf_layout) in raid6_idx_to_slot()
162 if (idx == sh->pd_idx) in raid6_idx_to_slot()
164 if (idx == sh->qd_idx) in raid6_idx_to_slot()
166 if (!sh->ddf_layout) in raid6_idx_to_slot()
186 static int stripe_operations_active(struct stripe_head *sh) in stripe_operations_active() argument
188 return sh->check_state || sh->reconstruct_state || in stripe_operations_active()
189 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || in stripe_operations_active()
190 test_bit(STRIPE_COMPUTE_RUN, &sh->state); in stripe_operations_active()
193 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) in __release_stripe() argument
195 if (atomic_dec_and_test(&sh->count)) { in __release_stripe()
196 BUG_ON(!list_empty(&sh->lru)); in __release_stripe()
198 if (test_bit(STRIPE_HANDLE, &sh->state)) { in __release_stripe()
199 if (test_bit(STRIPE_DELAYED, &sh->state) && in __release_stripe()
200 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in __release_stripe()
201 list_add_tail(&sh->lru, &conf->delayed_list); in __release_stripe()
202 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && in __release_stripe()
203 sh->bm_seq - conf->seq_write > 0) in __release_stripe()
204 list_add_tail(&sh->lru, &conf->bitmap_list); in __release_stripe()
206 clear_bit(STRIPE_DELAYED, &sh->state); in __release_stripe()
207 clear_bit(STRIPE_BIT_DELAY, &sh->state); in __release_stripe()
208 list_add_tail(&sh->lru, &conf->handle_list); in __release_stripe()
212 BUG_ON(stripe_operations_active(sh)); in __release_stripe()
213 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in __release_stripe()
218 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { in __release_stripe()
219 list_add_tail(&sh->lru, &conf->inactive_list); in __release_stripe()
228 static void release_stripe(struct stripe_head *sh) in release_stripe() argument
230 struct r5conf *conf = sh->raid_conf; in release_stripe()
234 __release_stripe(conf, sh); in release_stripe()
238 static inline void remove_hash(struct stripe_head *sh) in remove_hash() argument
241 (unsigned long long)sh->sector); in remove_hash()
243 hlist_del_init(&sh->hash); in remove_hash()
246 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
248 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
251 (unsigned long long)sh->sector); in insert_hash()
253 hlist_add_head(&sh->hash, hp); in insert_hash()
260 struct stripe_head *sh = NULL; in get_free_stripe() local
266 sh = list_entry(first, struct stripe_head, lru); in get_free_stripe()
268 remove_hash(sh); in get_free_stripe()
271 return sh; in get_free_stripe()
274 static void shrink_buffers(struct stripe_head *sh) in shrink_buffers() argument
278 int num = sh->raid_conf->pool_size; in shrink_buffers()
281 p = sh->dev[i].page; in shrink_buffers()
284 sh->dev[i].page = NULL; in shrink_buffers()
289 static int grow_buffers(struct stripe_head *sh) in grow_buffers() argument
292 int num = sh->raid_conf->pool_size; in grow_buffers()
300 sh->dev[i].page = page; in grow_buffers()
305 static void raid5_build_block(struct stripe_head *sh, int i, int previous);
307 struct stripe_head *sh);
309 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
311 struct r5conf *conf = sh->raid_conf; in init_stripe()
314 BUG_ON(atomic_read(&sh->count) != 0); in init_stripe()
315 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); in init_stripe()
316 BUG_ON(stripe_operations_active(sh)); in init_stripe()
319 (unsigned long long)sh->sector); in init_stripe()
321 remove_hash(sh); in init_stripe()
323 sh->generation = conf->generation - previous; in init_stripe()
324 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
325 sh->sector = sector; in init_stripe()
326 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
327 sh->state = 0; in init_stripe()
330 for (i = sh->disks; i--; ) { in init_stripe()
331 struct r5dev *dev = &sh->dev[i]; in init_stripe()
336 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
342 raid5_build_block(sh, i, previous); in init_stripe()
344 insert_hash(conf, sh); in init_stripe()
350 struct stripe_head *sh; in __find_stripe() local
354 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) in __find_stripe()
355 if (sh->sector == sector && sh->generation == generation) in __find_stripe()
356 return sh; in __find_stripe()
447 struct stripe_head *sh; in get_active_stripe() local
457 sh = __find_stripe(conf, sector, conf->generation - previous); in get_active_stripe()
458 if (!sh) { in get_active_stripe()
460 sh = get_free_stripe(conf); in get_active_stripe()
461 if (noblock && sh == NULL) in get_active_stripe()
463 if (!sh) { in get_active_stripe()
474 init_stripe(sh, sector, previous); in get_active_stripe()
476 if (atomic_read(&sh->count)) { in get_active_stripe()
477 BUG_ON(!list_empty(&sh->lru) in get_active_stripe()
478 && !test_bit(STRIPE_EXPANDING, &sh->state)); in get_active_stripe()
480 if (!test_bit(STRIPE_HANDLE, &sh->state)) in get_active_stripe()
482 if (list_empty(&sh->lru) && in get_active_stripe()
483 !test_bit(STRIPE_EXPANDING, &sh->state)) in get_active_stripe()
485 list_del_init(&sh->lru); in get_active_stripe()
488 } while (sh == NULL); in get_active_stripe()
490 if (sh) in get_active_stripe()
491 atomic_inc(&sh->count); in get_active_stripe()
494 return sh; in get_active_stripe()
502 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) in ops_run_io() argument
504 struct r5conf *conf = sh->raid_conf; in ops_run_io()
505 int i, disks = sh->disks; in ops_run_io()
514 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { in ops_run_io()
515 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) in ops_run_io()
519 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) in ops_run_io()
522 &sh->dev[i].flags)) { in ops_run_io()
528 bi = &sh->dev[i].req; in ops_run_io()
529 rbi = &sh->dev[i].rreq; /* For writing to replacement */ in ops_run_io()
554 if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev) in ops_run_io()
577 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in ops_run_io()
611 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
615 __func__, (unsigned long long)sh->sector, in ops_run_io()
617 atomic_inc(&sh->count); in ops_run_io()
618 bi->bi_sector = sh->sector + rdev->data_offset; in ops_run_io()
626 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); in ops_run_io()
634 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
639 __func__, (unsigned long long)sh->sector, in ops_run_io()
641 atomic_inc(&sh->count); in ops_run_io()
642 rbi->bi_sector = sh->sector + rrdev->data_offset; in ops_run_io()
653 set_bit(STRIPE_DEGRADED, &sh->state); in ops_run_io()
655 bi->bi_rw, i, (unsigned long long)sh->sector); in ops_run_io()
656 clear_bit(R5_LOCKED, &sh->dev[i].flags); in ops_run_io()
657 set_bit(STRIPE_HANDLE, &sh->state); in ops_run_io()
721 struct stripe_head *sh = stripe_head_ref; in ops_complete_biofill() local
723 struct r5conf *conf = sh->raid_conf; in ops_complete_biofill()
727 (unsigned long long)sh->sector); in ops_complete_biofill()
731 for (i = sh->disks; i--; ) { in ops_complete_biofill()
732 struct r5dev *dev = &sh->dev[i]; in ops_complete_biofill()
757 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); in ops_complete_biofill()
761 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_biofill()
762 release_stripe(sh); in ops_complete_biofill()
765 static void ops_run_biofill(struct stripe_head *sh) in ops_run_biofill() argument
768 struct r5conf *conf = sh->raid_conf; in ops_run_biofill()
773 (unsigned long long)sh->sector); in ops_run_biofill()
775 for (i = sh->disks; i--; ) { in ops_run_biofill()
776 struct r5dev *dev = &sh->dev[i]; in ops_run_biofill()
792 atomic_inc(&sh->count); in ops_run_biofill()
793 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); in ops_run_biofill()
797 static void mark_target_uptodate(struct stripe_head *sh, int target) in mark_target_uptodate() argument
804 tgt = &sh->dev[target]; in mark_target_uptodate()
812 struct stripe_head *sh = stripe_head_ref; in ops_complete_compute() local
815 (unsigned long long)sh->sector); in ops_complete_compute()
818 mark_target_uptodate(sh, sh->ops.target); in ops_complete_compute()
819 mark_target_uptodate(sh, sh->ops.target2); in ops_complete_compute()
821 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); in ops_complete_compute()
822 if (sh->check_state == check_state_compute_run) in ops_complete_compute()
823 sh->check_state = check_state_compute_result; in ops_complete_compute()
824 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_compute()
825 release_stripe(sh); in ops_complete_compute()
829 static addr_conv_t *to_addr_conv(struct stripe_head *sh, in to_addr_conv() argument
832 return percpu->scribble + sizeof(struct page *) * (sh->disks + 2); in to_addr_conv()
836 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument
838 int disks = sh->disks; in ops_run_compute5()
840 int target = sh->ops.target; in ops_run_compute5()
841 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute5()
849 __func__, (unsigned long long)sh->sector, target); in ops_run_compute5()
854 xor_srcs[count++] = sh->dev[i].page; in ops_run_compute5()
856 atomic_inc(&sh->count); in ops_run_compute5()
859 ops_complete_compute, sh, to_addr_conv(sh, percpu)); in ops_run_compute5()
877 static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) in set_syndrome_sources() argument
879 int disks = sh->disks; in set_syndrome_sources()
880 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); in set_syndrome_sources()
881 int d0_idx = raid6_d0(sh); in set_syndrome_sources()
891 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in set_syndrome_sources()
893 srcs[slot] = sh->dev[i].page; in set_syndrome_sources()
901 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_1() argument
903 int disks = sh->disks; in ops_run_compute6_1()
906 int qd_idx = sh->qd_idx; in ops_run_compute6_1()
914 if (sh->ops.target < 0) in ops_run_compute6_1()
915 target = sh->ops.target2; in ops_run_compute6_1()
916 else if (sh->ops.target2 < 0) in ops_run_compute6_1()
917 target = sh->ops.target; in ops_run_compute6_1()
923 __func__, (unsigned long long)sh->sector, target); in ops_run_compute6_1()
925 tgt = &sh->dev[target]; in ops_run_compute6_1()
929 atomic_inc(&sh->count); in ops_run_compute6_1()
932 count = set_syndrome_sources(blocks, sh); in ops_run_compute6_1()
936 ops_complete_compute, sh, in ops_run_compute6_1()
937 to_addr_conv(sh, percpu)); in ops_run_compute6_1()
945 blocks[count++] = sh->dev[i].page; in ops_run_compute6_1()
949 NULL, ops_complete_compute, sh, in ops_run_compute6_1()
950 to_addr_conv(sh, percpu)); in ops_run_compute6_1()
958 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_2() argument
960 int i, count, disks = sh->disks; in ops_run_compute6_2()
961 int syndrome_disks = sh->ddf_layout ? disks : disks-2; in ops_run_compute6_2()
962 int d0_idx = raid6_d0(sh); in ops_run_compute6_2()
964 int target = sh->ops.target; in ops_run_compute6_2()
965 int target2 = sh->ops.target2; in ops_run_compute6_2()
966 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute6_2()
967 struct r5dev *tgt2 = &sh->dev[target2]; in ops_run_compute6_2()
973 __func__, (unsigned long long)sh->sector, target, target2); in ops_run_compute6_2()
986 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in ops_run_compute6_2()
988 blocks[slot] = sh->dev[i].page; in ops_run_compute6_2()
1001 __func__, (unsigned long long)sh->sector, faila, failb); in ops_run_compute6_2()
1003 atomic_inc(&sh->count); in ops_run_compute6_2()
1010 ops_complete_compute, sh, in ops_run_compute6_2()
1011 to_addr_conv(sh, percpu)); in ops_run_compute6_2()
1017 int qd_idx = sh->qd_idx; in ops_run_compute6_2()
1029 blocks[count++] = sh->dev[i].page; in ops_run_compute6_2()
1031 dest = sh->dev[data_target].page; in ops_run_compute6_2()
1035 to_addr_conv(sh, percpu)); in ops_run_compute6_2()
1039 count = set_syndrome_sources(blocks, sh); in ops_run_compute6_2()
1041 ops_complete_compute, sh, in ops_run_compute6_2()
1042 to_addr_conv(sh, percpu)); in ops_run_compute6_2()
1048 ops_complete_compute, sh, in ops_run_compute6_2()
1049 to_addr_conv(sh, percpu)); in ops_run_compute6_2()
1067 struct stripe_head *sh = stripe_head_ref; in ops_complete_prexor() local
1070 (unsigned long long)sh->sector); in ops_complete_prexor()
1074 ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor() argument
1077 int disks = sh->disks; in ops_run_prexor()
1079 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor()
1083 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor()
1086 (unsigned long long)sh->sector); in ops_run_prexor()
1089 struct r5dev *dev = &sh->dev[i]; in ops_run_prexor()
1096 ops_complete_prexor, sh, to_addr_conv(sh, percpu)); in ops_run_prexor()
1103 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) in ops_run_biodrain() argument
1105 int disks = sh->disks; in ops_run_biodrain()
1109 (unsigned long long)sh->sector); in ops_run_biodrain()
1112 struct r5dev *dev = &sh->dev[i]; in ops_run_biodrain()
1118 spin_lock_irq(&sh->raid_conf->device_lock); in ops_run_biodrain()
1123 spin_unlock_irq(&sh->raid_conf->device_lock); in ops_run_biodrain()
1141 struct stripe_head *sh = stripe_head_ref; in ops_complete_reconstruct() local
1142 int disks = sh->disks; in ops_complete_reconstruct()
1143 int pd_idx = sh->pd_idx; in ops_complete_reconstruct()
1144 int qd_idx = sh->qd_idx; in ops_complete_reconstruct()
1149 (unsigned long long)sh->sector); in ops_complete_reconstruct()
1152 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); in ops_complete_reconstruct()
1155 struct r5dev *dev = &sh->dev[i]; in ops_complete_reconstruct()
1164 if (sh->reconstruct_state == reconstruct_state_drain_run) in ops_complete_reconstruct()
1165 sh->reconstruct_state = reconstruct_state_drain_result; in ops_complete_reconstruct()
1166 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) in ops_complete_reconstruct()
1167 sh->reconstruct_state = reconstruct_state_prexor_drain_result; in ops_complete_reconstruct()
1169 BUG_ON(sh->reconstruct_state != reconstruct_state_run); in ops_complete_reconstruct()
1170 sh->reconstruct_state = reconstruct_state_result; in ops_complete_reconstruct()
1173 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_reconstruct()
1174 release_stripe(sh); in ops_complete_reconstruct()
1178 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct5() argument
1181 int disks = sh->disks; in ops_run_reconstruct5()
1184 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_reconstruct5()
1190 (unsigned long long)sh->sector); in ops_run_reconstruct5()
1195 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { in ops_run_reconstruct5()
1197 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_reconstruct5()
1199 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
1204 xor_dest = sh->dev[pd_idx].page; in ops_run_reconstruct5()
1206 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
1220 atomic_inc(&sh->count); in ops_run_reconstruct5()
1222 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh, in ops_run_reconstruct5()
1223 to_addr_conv(sh, percpu)); in ops_run_reconstruct5()
1231 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct6() argument
1238 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_reconstruct6()
1240 count = set_syndrome_sources(blocks, sh); in ops_run_reconstruct6()
1242 atomic_inc(&sh->count); in ops_run_reconstruct6()
1245 sh, to_addr_conv(sh, percpu)); in ops_run_reconstruct6()
1251 struct stripe_head *sh = stripe_head_ref; in ops_complete_check() local
1254 (unsigned long long)sh->sector); in ops_complete_check()
1256 sh->check_state = check_state_check_result; in ops_complete_check()
1257 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_check()
1258 release_stripe(sh); in ops_complete_check()
1261 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_check_p() argument
1263 int disks = sh->disks; in ops_run_check_p()
1264 int pd_idx = sh->pd_idx; in ops_run_check_p()
1265 int qd_idx = sh->qd_idx; in ops_run_check_p()
1274 (unsigned long long)sh->sector); in ops_run_check_p()
1277 xor_dest = sh->dev[pd_idx].page; in ops_run_check_p()
1282 xor_srcs[count++] = sh->dev[i].page; in ops_run_check_p()
1286 to_addr_conv(sh, percpu)); in ops_run_check_p()
1288 &sh->ops.zero_sum_result, &submit); in ops_run_check_p()
1290 atomic_inc(&sh->count); in ops_run_check_p()
1291 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); in ops_run_check_p()
1295 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) in ops_run_check_pq() argument
1302 (unsigned long long)sh->sector, checkp); in ops_run_check_pq()
1304 count = set_syndrome_sources(srcs, sh); in ops_run_check_pq()
1308 atomic_inc(&sh->count); in ops_run_check_pq()
1310 sh, to_addr_conv(sh, percpu)); in ops_run_check_pq()
1312 &sh->ops.zero_sum_result, percpu->spare_page, &submit); in ops_run_check_pq()
1315 static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) in __raid_run_ops() argument
1317 int overlap_clear = 0, i, disks = sh->disks; in __raid_run_ops()
1319 struct r5conf *conf = sh->raid_conf; in __raid_run_ops()
1327 ops_run_biofill(sh); in __raid_run_ops()
1333 tx = ops_run_compute5(sh, percpu); in __raid_run_ops()
1335 if (sh->ops.target2 < 0 || sh->ops.target < 0) in __raid_run_ops()
1336 tx = ops_run_compute6_1(sh, percpu); in __raid_run_ops()
1338 tx = ops_run_compute6_2(sh, percpu); in __raid_run_ops()
1346 tx = ops_run_prexor(sh, percpu, tx); in __raid_run_ops()
1349 tx = ops_run_biodrain(sh, tx); in __raid_run_ops()
1355 ops_run_reconstruct5(sh, percpu, tx); in __raid_run_ops()
1357 ops_run_reconstruct6(sh, percpu, tx); in __raid_run_ops()
1361 if (sh->check_state == check_state_run) in __raid_run_ops()
1362 ops_run_check_p(sh, percpu); in __raid_run_ops()
1363 else if (sh->check_state == check_state_run_q) in __raid_run_ops()
1364 ops_run_check_pq(sh, percpu, 0); in __raid_run_ops()
1365 else if (sh->check_state == check_state_run_pq) in __raid_run_ops()
1366 ops_run_check_pq(sh, percpu, 1); in __raid_run_ops()
1373 struct r5dev *dev = &sh->dev[i]; in __raid_run_ops()
1375 wake_up(&sh->raid_conf->wait_for_overlap); in __raid_run_ops()
1383 struct stripe_head *sh = param; in async_run_ops() local
1384 unsigned long ops_request = sh->ops.request; in async_run_ops()
1386 clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state); in async_run_ops()
1387 wake_up(&sh->ops.wait_for_ops); in async_run_ops()
1389 __raid_run_ops(sh, ops_request); in async_run_ops()
1390 release_stripe(sh); in async_run_ops()
1393 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) in raid_run_ops() argument
1399 wait_event(sh->ops.wait_for_ops, in raid_run_ops()
1400 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state)); in raid_run_ops()
1401 sh->ops.request = ops_request; in raid_run_ops()
1403 atomic_inc(&sh->count); in raid_run_ops()
1404 async_schedule(async_run_ops, sh); in raid_run_ops()
1412 struct stripe_head *sh; in grow_one_stripe() local
1413 sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); in grow_one_stripe()
1414 if (!sh) in grow_one_stripe()
1417 sh->raid_conf = conf; in grow_one_stripe()
1419 init_waitqueue_head(&sh->ops.wait_for_ops); in grow_one_stripe()
1422 if (grow_buffers(sh)) { in grow_one_stripe()
1423 shrink_buffers(sh); in grow_one_stripe()
1424 kmem_cache_free(conf->slab_cache, sh); in grow_one_stripe()
1428 atomic_set(&sh->count, 1); in grow_one_stripe()
1430 INIT_LIST_HEAD(&sh->lru); in grow_one_stripe()
1431 release_stripe(sh); in grow_one_stripe()
1631 struct stripe_head *sh; in drop_one_stripe() local
1634 sh = get_free_stripe(conf); in drop_one_stripe()
1636 if (!sh) in drop_one_stripe()
1638 BUG_ON(atomic_read(&sh->count)); in drop_one_stripe()
1639 shrink_buffers(sh); in drop_one_stripe()
1640 kmem_cache_free(conf->slab_cache, sh); in drop_one_stripe()
1657 struct stripe_head *sh = bi->bi_private; in raid5_end_read_request() local
1658 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request()
1659 int disks = sh->disks, i; in raid5_end_read_request()
1666 if (bi == &sh->dev[i].req) in raid5_end_read_request()
1670 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_read_request()
1676 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
1687 set_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
1688 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in raid5_end_read_request()
1698 (unsigned long long)(sh->sector in raid5_end_read_request()
1702 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
1703 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
1711 clear_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
1713 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
1719 (unsigned long long)(sh->sector in raid5_end_read_request()
1728 (unsigned long long)(sh->sector in raid5_end_read_request()
1731 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) in raid5_end_read_request()
1738 (unsigned long long)(sh->sector in raid5_end_read_request()
1749 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
1751 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
1752 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
1757 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_read_request()
1758 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_read_request()
1759 release_stripe(sh); in raid5_end_read_request()
1764 struct stripe_head *sh = bi->bi_private; in raid5_end_write_request() local
1765 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request()
1766 int disks = sh->disks, i; in raid5_end_write_request()
1774 if (bi == &sh->dev[i].req) { in raid5_end_write_request()
1778 if (bi == &sh->dev[i].rreq) { in raid5_end_write_request()
1792 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_write_request()
1802 else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
1805 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); in raid5_end_write_request()
1808 set_bit(STRIPE_DEGRADED, &sh->state); in raid5_end_write_request()
1810 set_bit(R5_WriteError, &sh->dev[i].flags); in raid5_end_write_request()
1814 } else if (is_badblock(rdev, sh->sector, in raid5_end_write_request()
1817 set_bit(R5_MadeGood, &sh->dev[i].flags); in raid5_end_write_request()
1821 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) in raid5_end_write_request()
1822 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_write_request()
1823 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_write_request()
1824 release_stripe(sh); in raid5_end_write_request()
1827 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1829 static void raid5_build_block(struct stripe_head *sh, int i, int previous) in raid5_build_block() argument
1831 struct r5dev *dev = &sh->dev[i]; in raid5_build_block()
1837 dev->req.bi_private = sh; in raid5_build_block()
1844 dev->rreq.bi_private = sh; in raid5_build_block()
1848 dev->sector = compute_blocknr(sh, i, previous); in raid5_build_block()
1882 struct stripe_head *sh) in raid5_compute_sector() argument
2070 if (sh) { in raid5_compute_sector()
2071 sh->pd_idx = pd_idx; in raid5_compute_sector()
2072 sh->qd_idx = qd_idx; in raid5_compute_sector()
2073 sh->ddf_layout = ddf_layout; in raid5_compute_sector()
2083 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) in compute_blocknr() argument
2085 struct r5conf *conf = sh->raid_conf; in compute_blocknr()
2086 int raid_disks = sh->disks; in compute_blocknr()
2088 sector_t new_sector = sh->sector, check; in compute_blocknr()
2104 if (i == sh->pd_idx) in compute_blocknr()
2112 if (i > sh->pd_idx) in compute_blocknr()
2117 if (i < sh->pd_idx) in compute_blocknr()
2119 i -= (sh->pd_idx + 1); in compute_blocknr()
2131 if (i == sh->qd_idx) in compute_blocknr()
2138 if (sh->pd_idx == raid_disks-1) in compute_blocknr()
2140 else if (i > sh->pd_idx) in compute_blocknr()
2145 if (sh->pd_idx == raid_disks-1) in compute_blocknr()
2149 if (i < sh->pd_idx) in compute_blocknr()
2151 i -= (sh->pd_idx + 2); in compute_blocknr()
2161 if (sh->pd_idx == 0) in compute_blocknr()
2165 if (i < sh->pd_idx) in compute_blocknr()
2167 i -= (sh->pd_idx + 1); in compute_blocknr()
2172 if (i > sh->pd_idx) in compute_blocknr()
2177 if (i < sh->pd_idx) in compute_blocknr()
2179 i -= (sh->pd_idx + 1); in compute_blocknr()
2195 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in compute_blocknr()
2196 || sh2.qd_idx != sh->qd_idx) { in compute_blocknr()
2206 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, in schedule_reconstruction() argument
2209 int i, pd_idx = sh->pd_idx, disks = sh->disks; in schedule_reconstruction()
2210 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction()
2219 sh->reconstruct_state = reconstruct_state_drain_run; in schedule_reconstruction()
2222 sh->reconstruct_state = reconstruct_state_run; in schedule_reconstruction()
2227 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
2238 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) in schedule_reconstruction()
2242 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || in schedule_reconstruction()
2243 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); in schedule_reconstruction()
2245 sh->reconstruct_state = reconstruct_state_prexor_drain_run; in schedule_reconstruction()
2251 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
2269 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); in schedule_reconstruction()
2270 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in schedule_reconstruction()
2274 int qd_idx = sh->qd_idx; in schedule_reconstruction()
2275 struct r5dev *dev = &sh->dev[qd_idx]; in schedule_reconstruction()
2283 __func__, (unsigned long long)sh->sector, in schedule_reconstruction()
2292 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) in add_stripe_bio() argument
2295 struct r5conf *conf = sh->raid_conf; in add_stripe_bio()
2300 (unsigned long long)sh->sector); in add_stripe_bio()
2305 bip = &sh->dev[dd_idx].towrite; in add_stripe_bio()
2306 if (*bip == NULL && sh->dev[dd_idx].written == NULL) in add_stripe_bio()
2309 bip = &sh->dev[dd_idx].toread; in add_stripe_bio()
2326 sector_t sector = sh->dev[dd_idx].sector; in add_stripe_bio()
2327 for (bi=sh->dev[dd_idx].towrite; in add_stripe_bio()
2328 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && in add_stripe_bio()
2330 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { in add_stripe_bio()
2334 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) in add_stripe_bio()
2335 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); in add_stripe_bio()
2341 (unsigned long long)sh->sector, dd_idx); in add_stripe_bio()
2344 bitmap_startwrite(conf->mddev->bitmap, sh->sector, in add_stripe_bio()
2346 sh->bm_seq = conf->seq_flush+1; in add_stripe_bio()
2347 set_bit(STRIPE_BIT_DELAY, &sh->state); in add_stripe_bio()
2352 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); in add_stripe_bio()
2360 struct stripe_head *sh) in stripe_set_idx() argument
2372 &dd_idx, sh); in stripe_set_idx()
2376 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
2385 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in handle_failed_stripe()
2397 sh->sector, in handle_failed_stripe()
2405 bi = sh->dev[i].towrite; in handle_failed_stripe()
2406 sh->dev[i].towrite = NULL; in handle_failed_stripe()
2412 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
2416 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
2417 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
2427 bi = sh->dev[i].written; in handle_failed_stripe()
2428 sh->dev[i].written = NULL; in handle_failed_stripe()
2431 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
2432 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
2445 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && in handle_failed_stripe()
2446 (!test_bit(R5_Insync, &sh->dev[i].flags) || in handle_failed_stripe()
2447 test_bit(R5_ReadError, &sh->dev[i].flags))) { in handle_failed_stripe()
2448 bi = sh->dev[i].toread; in handle_failed_stripe()
2449 sh->dev[i].toread = NULL; in handle_failed_stripe()
2450 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
2454 sh->dev[i].sector + STRIPE_SECTORS) { in handle_failed_stripe()
2456 r5_next_bio(bi, sh->dev[i].sector); in handle_failed_stripe()
2467 bitmap_endwrite(conf->mddev->bitmap, sh->sector, in handle_failed_stripe()
2472 clear_bit(R5_LOCKED, &sh->dev[i].flags); in handle_failed_stripe()
2475 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_failed_stripe()
2481 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
2487 clear_bit(STRIPE_SYNCING, &sh->state); in handle_failed_sync()
2506 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
2513 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
2524 static int want_replace(struct stripe_head *sh, int disk_idx) in want_replace() argument
2529 rdev = sh->raid_conf->disks[disk_idx].replacement; in want_replace()
2533 && (rdev->recovery_offset <= sh->sector in want_replace()
2534 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
2546 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, in fetch_block() argument
2549 struct r5dev *dev = &sh->dev[disk_idx]; in fetch_block()
2550 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], in fetch_block()
2551 &sh->dev[s->failed_num[1]] }; in fetch_block()
2559 (s->replacing && want_replace(sh, disk_idx)) || in fetch_block()
2562 (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite && in fetch_block()
2564 (sh->raid_conf->level == 6 && s->failed && s->to_write))) { in fetch_block()
2577 (unsigned long long)sh->sector, disk_idx); in fetch_block()
2578 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
2581 sh->ops.target = disk_idx; in fetch_block()
2582 sh->ops.target2 = -1; /* no 2nd target */ in fetch_block()
2601 &sh->dev[other].flags)) in fetch_block()
2606 (unsigned long long)sh->sector, in fetch_block()
2608 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
2610 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); in fetch_block()
2611 set_bit(R5_Wantcompute, &sh->dev[other].flags); in fetch_block()
2612 sh->ops.target = disk_idx; in fetch_block()
2613 sh->ops.target2 = other; in fetch_block()
2632 static void handle_stripe_fill(struct stripe_head *sh, in handle_stripe_fill() argument
2642 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && in handle_stripe_fill()
2643 !sh->reconstruct_state) in handle_stripe_fill()
2645 if (fetch_block(sh, s, i, disks)) in handle_stripe_fill()
2647 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_fill()
2657 struct stripe_head *sh, int disks, struct bio **return_bi) in handle_stripe_clean_event() argument
2663 if (sh->dev[i].written) { in handle_stripe_clean_event()
2664 dev = &sh->dev[i]; in handle_stripe_clean_event()
2689 sh->sector, in handle_stripe_clean_event()
2691 !test_bit(STRIPE_DEGRADED, &sh->state), in handle_stripe_clean_event()
2696 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_stripe_clean_event()
2702 struct stripe_head *sh, in handle_stripe_dirtying() argument
2715 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
2716 if ((dev->towrite || i == sh->pd_idx) && in handle_stripe_dirtying()
2726 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && in handle_stripe_dirtying()
2736 (unsigned long long)sh->sector, rmw, rcw); in handle_stripe_dirtying()
2737 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
2741 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
2742 if ((dev->towrite || i == sh->pd_idx) && in handle_stripe_dirtying()
2748 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { in handle_stripe_dirtying()
2755 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
2756 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
2764 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
2766 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
2774 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { in handle_stripe_dirtying()
2781 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
2782 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
2797 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && in handle_stripe_dirtying()
2799 !test_bit(STRIPE_BIT_DELAY, &sh->state))) in handle_stripe_dirtying()
2800 schedule_reconstruction(sh, s, rcw == 0, 0); in handle_stripe_dirtying()
2803 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
2808 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks5()
2810 switch (sh->check_state) { in handle_parity_checks5()
2815 sh->check_state = check_state_run; in handle_parity_checks5()
2817 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
2821 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks5()
2824 sh->check_state = check_state_idle; in handle_parity_checks5()
2826 dev = &sh->dev[sh->pd_idx]; in handle_parity_checks5()
2829 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks5()
2840 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks5()
2841 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
2846 sh->check_state = check_state_idle; in handle_parity_checks5()
2858 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) in handle_parity_checks5()
2862 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
2867 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
2869 sh->check_state = check_state_compute_run; in handle_parity_checks5()
2870 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks5()
2873 &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
2874 sh->ops.target = sh->pd_idx; in handle_parity_checks5()
2875 sh->ops.target2 = -1; in handle_parity_checks5()
2884 __func__, sh->check_state, in handle_parity_checks5()
2885 (unsigned long long) sh->sector); in handle_parity_checks5()
2891 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
2895 int pd_idx = sh->pd_idx; in handle_parity_checks6()
2896 int qd_idx = sh->qd_idx; in handle_parity_checks6()
2899 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks6()
2909 switch (sh->check_state) { in handle_parity_checks6()
2917 sh->check_state = check_state_run; in handle_parity_checks6()
2923 if (sh->check_state == check_state_run) in handle_parity_checks6()
2924 sh->check_state = check_state_run_pq; in handle_parity_checks6()
2926 sh->check_state = check_state_run_q; in handle_parity_checks6()
2930 sh->ops.zero_sum_result = 0; in handle_parity_checks6()
2932 if (sh->check_state == check_state_run) { in handle_parity_checks6()
2934 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in handle_parity_checks6()
2937 if (sh->check_state >= check_state_run && in handle_parity_checks6()
2938 sh->check_state <= check_state_run_pq) { in handle_parity_checks6()
2950 sh->check_state = check_state_idle; in handle_parity_checks6()
2953 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks6()
2961 dev = &sh->dev[s->failed_num[1]]; in handle_parity_checks6()
2967 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks6()
2972 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
2973 dev = &sh->dev[pd_idx]; in handle_parity_checks6()
2978 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
2979 dev = &sh->dev[qd_idx]; in handle_parity_checks6()
2984 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks6()
2986 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
2993 sh->check_state = check_state_idle; in handle_parity_checks6()
2999 if (sh->ops.zero_sum_result == 0) { in handle_parity_checks6()
3002 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
3008 sh->check_state = check_state_compute_result; in handle_parity_checks6()
3019 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
3021 int *target = &sh->ops.target; in handle_parity_checks6()
3023 sh->ops.target = -1; in handle_parity_checks6()
3024 sh->ops.target2 = -1; in handle_parity_checks6()
3025 sh->check_state = check_state_compute_run; in handle_parity_checks6()
3026 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks6()
3028 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
3030 &sh->dev[pd_idx].flags); in handle_parity_checks6()
3032 target = &sh->ops.target2; in handle_parity_checks6()
3035 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
3037 &sh->dev[qd_idx].flags); in handle_parity_checks6()
3048 __func__, sh->check_state, in handle_parity_checks6()
3049 (unsigned long long) sh->sector); in handle_parity_checks6()
3054 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
3062 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); in handle_stripe_expansion()
3063 for (i = 0; i < sh->disks; i++) in handle_stripe_expansion()
3064 if (i != sh->pd_idx && i != sh->qd_idx) { in handle_stripe_expansion()
3069 sector_t bn = compute_blocknr(sh, i, 1); in handle_stripe_expansion()
3089 sh->dev[i].page, 0, 0, STRIPE_SIZE, in handle_stripe_expansion()
3127 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) in analyse_stripe() argument
3129 struct r5conf *conf = sh->raid_conf; in analyse_stripe()
3130 int disks = sh->disks; in analyse_stripe()
3137 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); in analyse_stripe()
3138 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); in analyse_stripe()
3151 dev = &sh->dev[i]; in analyse_stripe()
3162 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) in analyse_stripe()
3191 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && in analyse_stripe()
3192 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
3204 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, in analyse_stripe()
3231 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) in analyse_stripe()
3291 if (test_bit(STRIPE_SYNCING, &sh->state)) { in analyse_stripe()
3301 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
3310 static void handle_stripe(struct stripe_head *sh) in handle_stripe() argument
3313 struct r5conf *conf = sh->raid_conf; in handle_stripe()
3316 int disks = sh->disks; in handle_stripe()
3319 clear_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
3320 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { in handle_stripe()
3323 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
3327 if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { in handle_stripe()
3328 set_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
3329 clear_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
3330 clear_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
3332 clear_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
3336 (unsigned long long)sh->sector, sh->state, in handle_stripe()
3337 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, in handle_stripe()
3338 sh->check_state, sh->reconstruct_state); in handle_stripe()
3340 analyse_stripe(sh, &s); in handle_stripe()
3343 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
3350 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
3358 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { in handle_stripe()
3360 set_bit(STRIPE_BIOFILL_RUN, &sh->state); in handle_stripe()
3371 sh->check_state = 0; in handle_stripe()
3372 sh->reconstruct_state = 0; in handle_stripe()
3374 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); in handle_stripe()
3376 handle_failed_sync(conf, sh, &s); in handle_stripe()
3383 pdev = &sh->dev[sh->pd_idx]; in handle_stripe()
3384 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) in handle_stripe()
3385 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); in handle_stripe()
3386 qdev = &sh->dev[sh->qd_idx]; in handle_stripe()
3387 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) in handle_stripe()
3388 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) in handle_stripe()
3398 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); in handle_stripe()
3409 handle_stripe_fill(sh, &s, disks); in handle_stripe()
3415 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) in handle_stripe()
3417 if (sh->reconstruct_state == reconstruct_state_drain_result || in handle_stripe()
3418 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { in handle_stripe()
3419 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
3424 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); in handle_stripe()
3425 BUG_ON(sh->qd_idx >= 0 && in handle_stripe()
3426 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags)); in handle_stripe()
3428 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
3430 (i == sh->pd_idx || i == sh->qd_idx || in handle_stripe()
3437 ((i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe()
3439 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
3442 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe()
3452 if (s.to_write && !sh->reconstruct_state && !sh->check_state) in handle_stripe()
3453 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
3460 if (sh->check_state || in handle_stripe()
3462 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
3463 !test_bit(STRIPE_INSYNC, &sh->state))) { in handle_stripe()
3465 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
3467 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
3471 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) in handle_stripe()
3472 && !test_bit(STRIPE_REPLACED, &sh->state)) { in handle_stripe()
3475 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { in handle_stripe()
3476 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_stripe()
3477 set_bit(R5_WantReplace, &sh->dev[i].flags); in handle_stripe()
3478 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
3482 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
3483 set_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
3486 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
3487 test_bit(STRIPE_INSYNC, &sh->state)) { in handle_stripe()
3489 clear_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
3497 struct r5dev *dev = &sh->dev[s.failed_num[i]]; in handle_stripe()
3518 if (sh->reconstruct_state == reconstruct_state_result) { in handle_stripe()
3520 = get_active_stripe(conf, sh->sector, 1, 1, 1); in handle_stripe()
3525 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
3526 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
3536 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
3537 clear_bit(STRIPE_EXPANDING, &sh->state); in handle_stripe()
3539 set_bit(R5_Wantwrite, &sh->dev[i].flags); in handle_stripe()
3540 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
3545 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && in handle_stripe()
3546 !sh->reconstruct_state) { in handle_stripe()
3548 sh->disks = conf->raid_disks; in handle_stripe()
3549 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
3550 schedule_reconstruction(sh, &s, 1, 1); in handle_stripe()
3551 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { in handle_stripe()
3552 clear_bit(STRIPE_EXPAND_READY, &sh->state); in handle_stripe()
3559 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) in handle_stripe()
3560 handle_stripe_expansion(conf, sh); in handle_stripe()
3570 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
3574 if (!rdev_set_badblocks(rdev, sh->sector, in handle_stripe()
3581 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
3590 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
3597 raid_run_ops(sh, s.ops_request); in handle_stripe()
3599 ops_run_io(sh, &s); in handle_stripe()
3614 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); in handle_stripe()
3622 struct stripe_head *sh; in raid5_activate_delayed() local
3623 sh = list_entry(l, struct stripe_head, lru); in raid5_activate_delayed()
3625 clear_bit(STRIPE_DELAYED, &sh->state); in raid5_activate_delayed()
3626 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in raid5_activate_delayed()
3628 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
3640 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); in activate_bit_delay() local
3641 list_del_init(&sh->lru); in activate_bit_delay()
3642 atomic_inc(&sh->count); in activate_bit_delay()
3643 __release_stripe(conf, sh); in activate_bit_delay()
3904 struct stripe_head *sh; in __get_priority_stripe() local
3913 sh = list_entry(conf->handle_list.next, typeof(*sh), lru); in __get_priority_stripe()
3917 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { in __get_priority_stripe()
3931 sh = list_entry(conf->hold_list.next, in __get_priority_stripe()
3932 typeof(*sh), lru); in __get_priority_stripe()
3939 list_del_init(&sh->lru); in __get_priority_stripe()
3940 atomic_inc(&sh->count); in __get_priority_stripe()
3941 BUG_ON(atomic_read(&sh->count) != 1); in __get_priority_stripe()
3942 return sh; in __get_priority_stripe()
3951 struct stripe_head *sh; in make_request() local
4018 sh = get_active_stripe(conf, new_sector, previous, in make_request()
4020 if (sh) { in make_request()
4039 release_stripe(sh); in make_request()
4048 release_stripe(sh); in make_request()
4062 if (test_bit(STRIPE_EXPANDING, &sh->state) || in make_request()
4063 !add_stripe_bio(sh, bi, dd_idx, rw)) { in make_request()
4069 release_stripe(sh); in make_request()
4074 set_bit(STRIPE_HANDLE, &sh->state); in make_request()
4075 clear_bit(STRIPE_DELAYED, &sh->state); in make_request()
4077 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_request()
4079 release_stripe(sh); in make_request()
4117 struct stripe_head *sh; in reshape_request() local
4233 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); in reshape_request()
4234 set_bit(STRIPE_EXPANDING, &sh->state); in reshape_request()
4239 for (j=sh->disks; j--;) { in reshape_request()
4241 if (j == sh->pd_idx) in reshape_request()
4244 j == sh->qd_idx) in reshape_request()
4246 s = compute_blocknr(sh, j, 0); in reshape_request()
4251 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); in reshape_request()
4252 set_bit(R5_Expanded, &sh->dev[j].flags); in reshape_request()
4253 set_bit(R5_UPTODATE, &sh->dev[j].flags); in reshape_request()
4256 set_bit(STRIPE_EXPAND_READY, &sh->state); in reshape_request()
4257 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
4259 list_add(&sh->lru, &stripes); in reshape_request()
4282 sh = get_active_stripe(conf, first_sector, 1, 0, 1); in reshape_request()
4283 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); in reshape_request()
4284 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
4285 release_stripe(sh); in reshape_request()
4292 sh = list_entry(stripes.next, struct stripe_head, lru); in reshape_request()
4293 list_del_init(&sh->lru); in reshape_request()
4294 release_stripe(sh); in reshape_request()
4326 struct stripe_head *sh; in sync_request() local
4383 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); in sync_request()
4384 if (sh == NULL) { in sync_request()
4385 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); in sync_request()
4401 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); in sync_request()
4403 handle_stripe(sh); in sync_request()
4404 release_stripe(sh); in sync_request()
4421 struct stripe_head *sh; in retry_aligned_read() local
4442 sh = get_active_stripe(conf, sector, 0, 1, 0); in retry_aligned_read()
4444 if (!sh) { in retry_aligned_read()
4451 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { in retry_aligned_read()
4452 release_stripe(sh); in retry_aligned_read()
4458 handle_stripe(sh); in retry_aligned_read()
4459 release_stripe(sh); in retry_aligned_read()
4482 struct stripe_head *sh; in raid5d() local
4520 sh = __get_priority_stripe(conf); in raid5d()
4522 if (!sh) in raid5d()
4527 handle_stripe(sh); in raid5d()
4528 release_stripe(sh); in raid5d()