Lines Matching refs:sector

127 		drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);  in drbd_endio_write_sec_final()
157 drbd_rs_complete_io(device, i.sector); in drbd_endio_write_sec_final()
183 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio()
356 sector_t sector = peer_req->i.sector; in w_e_send_csum() local
367 err = drbd_send_drequest_csum(peer_device, sector, size, in w_e_send_csum()
387 static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size) in read_for_csum() argument
397 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, in read_for_csum()
588 sector_t sector; in make_resync_request() local
654 sector = BM_BIT_TO_SECT(bit); in make_resync_request()
656 if (drbd_try_rs_begin_io(device, sector)) { in make_resync_request()
663 drbd_rs_complete_io(device, sector); in make_resync_request()
681 if (sector & ((1<<(align+3))-1)) in make_resync_request()
710 if (sector + (size>>9) > capacity) in make_resync_request()
711 size = (capacity-sector)<<9; in make_resync_request()
714 switch (read_for_csum(peer_device, sector, size)) { in make_resync_request()
719 drbd_rs_complete_io(device, sector); in make_resync_request()
720 device->bm_resync_fo = BM_SECT_TO_BIT(sector); in make_resync_request()
735 sector, size, ID_SYNCER); in make_resync_request()
766 sector_t sector; in make_ov_request() local
775 sector = device->ov_position; in make_ov_request()
777 if (sector >= capacity) in make_ov_request()
785 && sector >= device->ov_stop_sector; in make_ov_request()
791 if (drbd_try_rs_begin_io(device, sector)) { in make_ov_request()
792 device->ov_position = sector; in make_ov_request()
796 if (sector + (size>>9) > capacity) in make_ov_request()
797 size = (capacity-sector)<<9; in make_ov_request()
800 if (drbd_send_ov_request(first_peer_device(device), sector, size)) { in make_ov_request()
804 sector += BM_SECT_PER_BIT; in make_ov_request()
806 device->ov_position = sector; in make_ov_request()
1067 (unsigned long long)peer_req->i.sector); in w_e_end_data_req()
1124 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_rsdata_req()
1146 (unsigned long long)peer_req->i.sector); in w_e_end_rsdata_req()
1151 drbd_rs_failed_io(device, peer_req->i.sector, peer_req->i.size); in w_e_end_rsdata_req()
1180 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_csum_rs_req()
1202 drbd_set_in_sync(device, peer_req->i.sector, peer_req->i.size); in w_e_end_csum_rs_req()
1232 sector_t sector = peer_req->i.sector; in w_e_end_ov_req() local
1261 err = drbd_send_drequest_csum(peer_device, sector, size, digest, digest_size, P_OV_REPLY); in w_e_end_ov_req()
1273 void drbd_ov_out_of_sync_found(struct drbd_device *device, sector_t sector, int size) in drbd_ov_out_of_sync_found() argument
1275 if (device->ov_last_oos_start + device->ov_last_oos_size == sector) { in drbd_ov_out_of_sync_found()
1278 device->ov_last_oos_start = sector; in drbd_ov_out_of_sync_found()
1281 drbd_set_out_of_sync(device, sector, size); in drbd_ov_out_of_sync_found()
1291 sector_t sector = peer_req->i.sector; in w_e_end_ov_reply() local
1306 drbd_rs_complete_io(device, peer_req->i.sector); in w_e_end_ov_reply()
1331 drbd_ov_out_of_sync_found(device, sector, size); in w_e_end_ov_reply()
1335 err = drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, in w_e_end_ov_reply()
1347 (sector + (size>>9)) >= device->ov_stop_sector; in w_e_end_ov_reply()
1502 err = drbd_send_drequest(peer_device, P_DATA_REQUEST, req->i.sector, req->i.size, in w_send_read_req()