Lines Matching refs:sector
361 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in drbd_alloc_peer_req() argument
390 peer_req->i.sector = sector; in drbd_alloc_peer_req()
1600 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector, in drbd_issue_peer_discard_or_zero_out()
1629 sector_t sector = peer_req->i.sector; in drbd_submit_peer_request() local
1672 bio->bi_iter.bi_sector = sector; in drbd_submit_peer_request()
1685 sector += len >> 9; in drbd_submit_peer_request()
1825 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in read_in_block() argument
1875 if (sector + (ds>>9) > capacity) { in read_in_block()
1879 (unsigned long long)sector, ds); in read_in_block()
1886 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO); in read_in_block()
1923 (unsigned long long)sector, data_size); in read_in_block()
1961 sector_t sector, int data_size) in recv_dless_read() argument
1984 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector); in recv_dless_read()
2018 sector_t sector = peer_req->i.sector; in e_end_resync_block() local
2024 drbd_set_in_sync(device, sector, peer_req->i.size); in e_end_resync_block()
2028 drbd_rs_failed_io(device, sector, peer_req->i.size); in e_end_resync_block()
2037 static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector, in recv_resync_read() argument
2043 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi); in recv_resync_read()
2079 sector_t sector, bool missing_ok, const char *func) in find_request() argument
2085 if (drbd_contains_interval(root, sector, &req->i) && req->i.local) in find_request()
2089 (unsigned long)id, (unsigned long long)sector); in find_request()
2099 sector_t sector; in receive_DataReply() local
2108 sector = be64_to_cpu(p->sector); in receive_DataReply()
2111 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__); in receive_DataReply()
2116 err = recv_dless_read(peer_device, req, sector, pi->size); in receive_DataReply()
2130 sector_t sector; in receive_RSDataReply() local
2139 sector = be64_to_cpu(p->sector); in receive_RSDataReply()
2146 err = recv_resync_read(peer_device, sector, pi); in receive_RSDataReply()
2162 sector_t sector, int size) in restart_conflicting_writes() argument
2167 drbd_for_each_overlap(i, &device->write_requests, sector, size) { in restart_conflicting_writes()
2189 sector_t sector = peer_req->i.sector; in e_end_block() local
2200 drbd_set_in_sync(device, sector, peer_req->i.size); in e_end_block()
2216 restart_conflicting_writes(device, sector, peer_req->i.size); in e_end_block()
2298 if (overlaps(peer_req->i.sector, peer_req->i.size, in overlapping_resync_write()
2299 rs_req->i.sector, rs_req->i.size)) { in overlapping_resync_write()
2398 static void fail_postponed_requests(struct drbd_device *device, sector_t sector, in fail_postponed_requests() argument
2404 drbd_for_each_overlap(i, &device->write_requests, sector, size) { in fail_postponed_requests()
2428 sector_t sector = peer_req->i.sector; in handle_write_conflicts() local
2441 drbd_for_each_overlap(i, &device->write_requests, sector, size) { in handle_write_conflicts()
2459 equal = i->sector == sector && i->size == size; in handle_write_conflicts()
2467 bool superseded = i->sector <= sector && i->sector + in handle_write_conflicts()
2468 (i->size >> 9) >= sector + (size >> 9); in handle_write_conflicts()
2474 (unsigned long long)i->sector, i->size, in handle_write_conflicts()
2475 (unsigned long long)sector, size, in handle_write_conflicts()
2492 (unsigned long long)i->sector, i->size, in handle_write_conflicts()
2493 (unsigned long long)sector, size); in handle_write_conflicts()
2511 fail_postponed_requests(device, sector, size); in handle_write_conflicts()
2537 sector_t sector; in receive_Data() local
2569 sector = be64_to_cpu(p->sector); in receive_Data()
2570 peer_req = read_in_block(peer_device, p->block_id, sector, pi); in receive_Data()
2674 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size); in receive_Data()
2714 bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, in drbd_rs_should_slow_down() argument
2724 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector)); in drbd_rs_should_slow_down()
2787 sector_t sector; in receive_DataRequest() local
2801 sector = be64_to_cpu(p->sector); in receive_DataRequest()
2806 (unsigned long long)sector, size); in receive_DataRequest()
2809 if (sector + (size>>9) > capacity) { in receive_DataRequest()
2811 (unsigned long long)sector, size); in receive_DataRequest()
2830 drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC); in receive_DataRequest()
2846 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, in receive_DataRequest()
2872 device->bm_resync_fo = BM_SECT_TO_BIT(sector); in receive_DataRequest()
2895 device->bm_resync_fo = BM_SECT_TO_BIT(sector); in receive_DataRequest()
2914 device->ov_start_sector = sector; in receive_DataRequest()
2915 device->ov_position = sector; in receive_DataRequest()
2916 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector); in receive_DataRequest()
2923 (unsigned long long)sector); in receive_DataRequest()
2966 && drbd_rs_should_slow_down(device, sector, false)) in receive_DataRequest()
2969 if (drbd_rs_begin_io(device, sector)) in receive_DataRequest()
4925 drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); in receive_out_of_sync()
4935 sector_t sector; in receive_rs_deallocated() local
4943 sector = be64_to_cpu(p->sector); in receive_rs_deallocated()
4952 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector, in receive_rs_deallocated()
4988 drbd_rs_complete_io(device, sector); in receive_rs_deallocated()
4989 drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER); in receive_rs_deallocated()
5625 sector_t sector = be64_to_cpu(p->sector); in got_IsInSync() local
5638 drbd_rs_complete_io(device, sector); in got_IsInSync()
5639 drbd_set_in_sync(device, sector, blksize); in got_IsInSync()
5651 validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector, in validate_req_change_req_state() argument
5659 req = find_request(device, root, id, sector, missing_ok, func); in validate_req_change_req_state()
5677 sector_t sector = be64_to_cpu(p->sector); in got_BlockAck() local
5689 drbd_set_in_sync(device, sector, blksize); in got_BlockAck()
5713 return validate_req_change_req_state(device, p->block_id, sector, in got_BlockAck()
5723 sector_t sector = be64_to_cpu(p->sector); in got_NegAck() local
5736 drbd_rs_failed_io(device, sector, size); in got_NegAck()
5740 err = validate_req_change_req_state(device, p->block_id, sector, in got_NegAck()
5749 drbd_set_out_of_sync(device, sector, size); in got_NegAck()
5759 sector_t sector = be64_to_cpu(p->sector); in got_NegDReply() local
5769 (unsigned long long)sector, be32_to_cpu(p->blksize)); in got_NegDReply()
5771 return validate_req_change_req_state(device, p->block_id, sector, in got_NegDReply()
5780 sector_t sector; in got_NegRSDReply() local
5789 sector = be64_to_cpu(p->sector); in got_NegRSDReply()
5797 drbd_rs_complete_io(device, sector); in got_NegRSDReply()
5800 drbd_rs_failed_io(device, sector, size); in got_NegRSDReply()
5843 sector_t sector; in got_OVResult() local
5851 sector = be64_to_cpu(p->sector); in got_OVResult()
5857 drbd_ov_out_of_sync_found(device, sector, size); in got_OVResult()
5864 drbd_rs_complete_io(device, sector); in got_OVResult()