Lines Matching refs:pd_idx

162 	if (idx == sh->pd_idx)  in raid6_idx_to_slot()
1079 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor() local
1083 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor()
1143 int pd_idx = sh->pd_idx; in ops_complete_reconstruct() local
1157 if (dev->written || i == pd_idx || i == qd_idx) { in ops_complete_reconstruct()
1184 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_reconstruct5() local
1197 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_reconstruct5()
1204 xor_dest = sh->dev[pd_idx].page; in ops_run_reconstruct5()
1207 if (i != pd_idx) in ops_run_reconstruct5()
1264 int pd_idx = sh->pd_idx; in ops_run_check_p() local
1277 xor_dest = sh->dev[pd_idx].page; in ops_run_check_p()
1280 if (i == pd_idx || i == qd_idx) in ops_run_check_p()
1887 int pd_idx, qd_idx; in raid5_compute_sector() local
1915 pd_idx = qd_idx = -1; in raid5_compute_sector()
1918 pd_idx = data_disks; in raid5_compute_sector()
1923 pd_idx = data_disks - sector_div(stripe2, raid_disks); in raid5_compute_sector()
1924 if (*dd_idx >= pd_idx) in raid5_compute_sector()
1928 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
1929 if (*dd_idx >= pd_idx) in raid5_compute_sector()
1933 pd_idx = data_disks - sector_div(stripe2, raid_disks); in raid5_compute_sector()
1934 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
1937 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
1938 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
1941 pd_idx = 0; in raid5_compute_sector()
1945 pd_idx = data_disks; in raid5_compute_sector()
1955 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); in raid5_compute_sector()
1956 qd_idx = pd_idx + 1; in raid5_compute_sector()
1957 if (pd_idx == raid_disks-1) { in raid5_compute_sector()
1960 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
1964 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
1965 qd_idx = pd_idx + 1; in raid5_compute_sector()
1966 if (pd_idx == raid_disks-1) { in raid5_compute_sector()
1969 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
1973 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); in raid5_compute_sector()
1974 qd_idx = (pd_idx + 1) % raid_disks; in raid5_compute_sector()
1975 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; in raid5_compute_sector()
1978 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
1979 qd_idx = (pd_idx + 1) % raid_disks; in raid5_compute_sector()
1980 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; in raid5_compute_sector()
1984 pd_idx = 0; in raid5_compute_sector()
1989 pd_idx = data_disks; in raid5_compute_sector()
1997 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
1998 qd_idx = pd_idx + 1; in raid5_compute_sector()
1999 if (pd_idx == raid_disks-1) { in raid5_compute_sector()
2002 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
2013 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); in raid5_compute_sector()
2014 qd_idx = pd_idx + 1; in raid5_compute_sector()
2015 if (pd_idx == raid_disks-1) { in raid5_compute_sector()
2018 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
2025 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); in raid5_compute_sector()
2026 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; in raid5_compute_sector()
2027 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
2033 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); in raid5_compute_sector()
2034 if (*dd_idx >= pd_idx) in raid5_compute_sector()
2040 pd_idx = sector_div(stripe2, raid_disks-1); in raid5_compute_sector()
2041 if (*dd_idx >= pd_idx) in raid5_compute_sector()
2047 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); in raid5_compute_sector()
2048 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); in raid5_compute_sector()
2053 pd_idx = sector_div(stripe2, raid_disks-1); in raid5_compute_sector()
2054 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); in raid5_compute_sector()
2059 pd_idx = 0; in raid5_compute_sector()
2071 sh->pd_idx = pd_idx; in raid5_compute_sector()
2104 if (i == sh->pd_idx) in compute_blocknr()
2112 if (i > sh->pd_idx) in compute_blocknr()
2117 if (i < sh->pd_idx) in compute_blocknr()
2119 i -= (sh->pd_idx + 1); in compute_blocknr()
2138 if (sh->pd_idx == raid_disks-1) in compute_blocknr()
2140 else if (i > sh->pd_idx) in compute_blocknr()
2145 if (sh->pd_idx == raid_disks-1) in compute_blocknr()
2149 if (i < sh->pd_idx) in compute_blocknr()
2151 i -= (sh->pd_idx + 2); in compute_blocknr()
2161 if (sh->pd_idx == 0) in compute_blocknr()
2165 if (i < sh->pd_idx) in compute_blocknr()
2167 i -= (sh->pd_idx + 1); in compute_blocknr()
2172 if (i > sh->pd_idx) in compute_blocknr()
2177 if (i < sh->pd_idx) in compute_blocknr()
2179 i -= (sh->pd_idx + 1); in compute_blocknr()
2195 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in compute_blocknr()
2209 int i, pd_idx = sh->pd_idx, disks = sh->disks; in schedule_reconstruction() local
2242 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || in schedule_reconstruction()
2243 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); in schedule_reconstruction()
2252 if (i == pd_idx) in schedule_reconstruction()
2269 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); in schedule_reconstruction()
2270 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in schedule_reconstruction()
2716 if ((dev->towrite || i == sh->pd_idx) && in handle_stripe_dirtying()
2726 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && in handle_stripe_dirtying()
2742 if ((dev->towrite || i == sh->pd_idx) && in handle_stripe_dirtying()
2766 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
2817 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
2826 dev = &sh->dev[sh->pd_idx]; in handle_parity_checks5()
2873 &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
2874 sh->ops.target = sh->pd_idx; in handle_parity_checks5()
2895 int pd_idx = sh->pd_idx; in handle_parity_checks6() local
2934 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in handle_parity_checks6()
2973 dev = &sh->dev[pd_idx]; in handle_parity_checks6()
3030 &sh->dev[pd_idx].flags); in handle_parity_checks6()
3031 *target = pd_idx; in handle_parity_checks6()
3064 if (i != sh->pd_idx && i != sh->qd_idx) { in handle_stripe_expansion()
3095 if (j != sh2->pd_idx && in handle_stripe_expansion()
3337 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, in handle_stripe()
3383 pdev = &sh->dev[sh->pd_idx]; in handle_stripe()
3384 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) in handle_stripe()
3385 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); in handle_stripe()
3424 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); in handle_stripe()
3430 (i == sh->pd_idx || i == sh->qd_idx || in handle_stripe()
3437 ((i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe()
4241 if (j == sh->pd_idx) in reshape_request()