Lines Matching refs:sector

360 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector,  in drbd_alloc_peer_req()  argument
389 peer_req->i.sector = sector; in drbd_alloc_peer_req()
1602 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector, in drbd_issue_peer_discard_or_zero_out()
1640 sector_t sector = peer_req->i.sector; in drbd_submit_peer_request() local
1694 bio->bi_iter.bi_sector = sector; in drbd_submit_peer_request()
1707 sector += len >> 9; in drbd_submit_peer_request()
1847 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in read_in_block() argument
1897 if (sector + (ds>>9) > capacity) { in read_in_block()
1901 (unsigned long long)sector, ds); in read_in_block()
1908 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO); in read_in_block()
1945 (unsigned long long)sector, data_size); in read_in_block()
1983 sector_t sector, int data_size) in recv_dless_read() argument
2006 D_ASSERT(peer_device->device, sector == bio->bi_iter.bi_sector); in recv_dless_read()
2040 sector_t sector = peer_req->i.sector; in e_end_resync_block() local
2046 drbd_set_in_sync(peer_device, sector, peer_req->i.size); in e_end_resync_block()
2050 drbd_rs_failed_io(peer_device, sector, peer_req->i.size); in e_end_resync_block()
2059 static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector, in recv_resync_read() argument
2065 peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi); in recv_resync_read()
2101 sector_t sector, bool missing_ok, const char *func) in find_request() argument
2107 if (drbd_contains_interval(root, sector, &req->i) && req->i.local) in find_request()
2111 (unsigned long)id, (unsigned long long)sector); in find_request()
2121 sector_t sector; in receive_DataReply() local
2130 sector = be64_to_cpu(p->sector); in receive_DataReply()
2133 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__); in receive_DataReply()
2138 err = recv_dless_read(peer_device, req, sector, pi->size); in receive_DataReply()
2152 sector_t sector; in receive_RSDataReply() local
2161 sector = be64_to_cpu(p->sector); in receive_RSDataReply()
2168 err = recv_resync_read(peer_device, sector, pi); in receive_RSDataReply()
2184 sector_t sector, int size) in restart_conflicting_writes() argument
2189 drbd_for_each_overlap(i, &device->write_requests, sector, size) { in restart_conflicting_writes()
2211 sector_t sector = peer_req->i.sector; in e_end_block() local
2222 drbd_set_in_sync(peer_device, sector, peer_req->i.size); in e_end_block()
2238 restart_conflicting_writes(device, sector, peer_req->i.size); in e_end_block()
2320 if (overlaps(peer_req->i.sector, peer_req->i.size, in overlapping_resync_write()
2321 rs_req->i.sector, rs_req->i.size)) { in overlapping_resync_write()
2419 static void fail_postponed_requests(struct drbd_device *device, sector_t sector, in fail_postponed_requests() argument
2426 drbd_for_each_overlap(i, &device->write_requests, sector, size) { in fail_postponed_requests()
2450 sector_t sector = peer_req->i.sector; in handle_write_conflicts() local
2463 drbd_for_each_overlap(i, &device->write_requests, sector, size) { in handle_write_conflicts()
2481 equal = i->sector == sector && i->size == size; in handle_write_conflicts()
2489 bool superseded = i->sector <= sector && i->sector + in handle_write_conflicts()
2490 (i->size >> 9) >= sector + (size >> 9); in handle_write_conflicts()
2496 (unsigned long long)i->sector, i->size, in handle_write_conflicts()
2497 (unsigned long long)sector, size, in handle_write_conflicts()
2514 (unsigned long long)i->sector, i->size, in handle_write_conflicts()
2515 (unsigned long long)sector, size); in handle_write_conflicts()
2533 fail_postponed_requests(device, sector, size); in handle_write_conflicts()
2559 sector_t sector; in receive_Data() local
2589 sector = be64_to_cpu(p->sector); in receive_Data()
2590 peer_req = read_in_block(peer_device, p->block_id, sector, pi); in receive_Data()
2693 drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size); in receive_Data()
2732 bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector, in drbd_rs_should_slow_down() argument
2743 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector)); in drbd_rs_should_slow_down()
2806 sector_t sector; in receive_DataRequest() local
2819 sector = be64_to_cpu(p->sector); in receive_DataRequest()
2824 (unsigned long long)sector, size); in receive_DataRequest()
2827 if (sector + (size>>9) > capacity) { in receive_DataRequest()
2829 (unsigned long long)sector, size); in receive_DataRequest()
2848 drbd_send_ack_ex(peer_device, P_OV_RESULT, sector, size, ID_IN_SYNC); in receive_DataRequest()
2864 peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, in receive_DataRequest()
2889 device->bm_resync_fo = BM_SECT_TO_BIT(sector); in receive_DataRequest()
2911 device->bm_resync_fo = BM_SECT_TO_BIT(sector); in receive_DataRequest()
2930 device->ov_start_sector = sector; in receive_DataRequest()
2931 device->ov_position = sector; in receive_DataRequest()
2932 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector); in receive_DataRequest()
2939 (unsigned long long)sector); in receive_DataRequest()
2981 && drbd_rs_should_slow_down(peer_device, sector, false)) in receive_DataRequest()
2984 if (drbd_rs_begin_io(device, sector)) in receive_DataRequest()
4940 drbd_set_out_of_sync(peer_device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); in receive_out_of_sync()
4950 sector_t sector; in receive_rs_deallocated() local
4958 sector = be64_to_cpu(p->sector); in receive_rs_deallocated()
4966 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER, sector, in receive_rs_deallocated()
5002 drbd_rs_complete_io(device, sector); in receive_rs_deallocated()
5003 drbd_send_ack_ex(peer_device, P_NEG_ACK, sector, size, ID_SYNCER); in receive_rs_deallocated()
5639 sector_t sector = be64_to_cpu(p->sector); in got_IsInSync() local
5652 drbd_rs_complete_io(device, sector); in got_IsInSync()
5653 drbd_set_in_sync(peer_device, sector, blksize); in got_IsInSync()
5665 validate_req_change_req_state(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in validate_req_change_req_state() argument
5674 req = find_request(device, root, id, sector, missing_ok, func); in validate_req_change_req_state()
5692 sector_t sector = be64_to_cpu(p->sector); in got_BlockAck() local
5704 drbd_set_in_sync(peer_device, sector, blksize); in got_BlockAck()
5728 return validate_req_change_req_state(peer_device, p->block_id, sector, in got_BlockAck()
5738 sector_t sector = be64_to_cpu(p->sector); in got_NegAck() local
5751 drbd_rs_failed_io(peer_device, sector, size); in got_NegAck()
5755 err = validate_req_change_req_state(peer_device, p->block_id, sector, in got_NegAck()
5764 drbd_set_out_of_sync(peer_device, sector, size); in got_NegAck()
5774 sector_t sector = be64_to_cpu(p->sector); in got_NegDReply() local
5784 (unsigned long long)sector, be32_to_cpu(p->blksize)); in got_NegDReply()
5786 return validate_req_change_req_state(peer_device, p->block_id, sector, in got_NegDReply()
5795 sector_t sector; in got_NegRSDReply() local
5804 sector = be64_to_cpu(p->sector); in got_NegRSDReply()
5812 drbd_rs_complete_io(device, sector); in got_NegRSDReply()
5815 drbd_rs_failed_io(peer_device, sector, size); in got_NegRSDReply()
5858 sector_t sector; in got_OVResult() local
5866 sector = be64_to_cpu(p->sector); in got_OVResult()
5872 drbd_ov_out_of_sync_found(peer_device, sector, size); in got_OVResult()
5879 drbd_rs_complete_io(device, sector); in got_OVResult()