/linux-6.1.9/block/ |
D | blk-ia-ranges.c | 25 return sprintf(buf, "%llu\n", iar->nr_sectors); in blk_ia_range_nr_sectors_show() 185 sector < iar->sector + iar->nr_sectors) in disk_find_ia_range() 218 swap(iar->nr_sectors, tmp->nr_sectors); in disk_check_ia_ranges() 221 sector += iar->nr_sectors; in disk_check_ia_ranges() 246 new->ia_range[i].nr_sectors != old->ia_range[i].nr_sectors) in disk_ia_ranges_changed()
|
D | blk-zoned.c | 257 sector_t sector, sector_t nr_sectors, gfp_t gfp_mask) in blkdev_zone_mgmt() argument 262 sector_t end_sector = sector + nr_sectors; in blkdev_zone_mgmt() 283 if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity) in blkdev_zone_mgmt() 292 if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) { in blkdev_zone_mgmt() 375 if (zrange->sector + zrange->nr_sectors <= zrange->sector || in blkdev_truncate_zone_range() 376 zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk)) in blkdev_truncate_zone_range() 381 end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1; in blkdev_truncate_zone_range() 438 ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, in blkdev_zone_mgmt_ioctl()
|
D | blk-core.c | 515 unsigned int nr_sectors = bio_sectors(bio); in bio_check_eod() local 517 if (nr_sectors && maxsector && in bio_check_eod() 518 (nr_sectors > maxsector || in bio_check_eod() 519 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { in bio_check_eod() 523 bio->bi_iter.bi_sector, nr_sectors, maxsector); in bio_check_eod() 554 int nr_sectors = bio_sectors(bio); in blk_check_zone_append() local 570 if (nr_sectors > q->limits.chunk_sectors) in blk_check_zone_append() 574 if (nr_sectors > q->limits.max_zone_append_sectors) in blk_check_zone_append()
|
/linux-6.1.9/drivers/md/bcache/ |
D | writeback.h | 80 unsigned int nr_sectors) in bcache_dev_stripe_dirty() argument 91 if (nr_sectors <= dc->disk.stripe_size) in bcache_dev_stripe_dirty() 94 nr_sectors -= dc->disk.stripe_size; in bcache_dev_stripe_dirty() 149 uint64_t offset, int nr_sectors);
|
D | writeback.c | 597 uint64_t offset, int nr_sectors) in bcache_dev_sectors_dirty_add() argument 611 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); in bcache_dev_sectors_dirty_add() 615 while (nr_sectors) { in bcache_dev_sectors_dirty_add() 616 int s = min_t(unsigned int, abs(nr_sectors), in bcache_dev_sectors_dirty_add() 619 if (nr_sectors < 0) in bcache_dev_sectors_dirty_add() 635 nr_sectors -= s; in bcache_dev_sectors_dirty_add()
|
/linux-6.1.9/drivers/block/null_blk/ |
D | null_blk.h | 140 sector_t nr_sectors); 142 sector_t sector, unsigned int nr_sectors); 151 sector_t sector, sector_t nr_sectors); 167 enum req_op op, sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd() argument
|
D | zoned.c | 241 unsigned int nr_sectors = len >> SECTOR_SHIFT; in null_zone_valid_read_len() local 245 sector + nr_sectors <= zone->wp) in null_zone_valid_read_len() 370 unsigned int nr_sectors, bool append) in null_zone_write() argument 382 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); in null_zone_write() 410 if (zone->wp + nr_sectors > zone->start + zone->capacity) { in null_zone_write() 437 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); in null_zone_write() 441 zone->wp += nr_sectors; in null_zone_write() 657 sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd() argument 665 return null_zone_write(cmd, sector, nr_sectors, false); in null_process_zoned_cmd() 667 return null_zone_write(cmd, sector, nr_sectors, true); in null_process_zoned_cmd() [all …]
|
D | main.c | 1169 sector_t sector, sector_t nr_sectors) in null_handle_discard() argument 1172 size_t n = nr_sectors << SECTOR_SHIFT; in null_handle_discard() 1332 sector_t nr_sectors) in null_handle_badblocks() argument 1338 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors)) in null_handle_badblocks() 1347 sector_t nr_sectors) in null_handle_memory_backed() argument 1353 return null_handle_discard(dev, sector, nr_sectors); in null_handle_memory_backed() 1417 sector_t sector, unsigned int nr_sectors) in null_process_cmd() argument 1423 ret = null_handle_badblocks(cmd, sector, nr_sectors); in null_process_cmd() 1429 return null_handle_memory_backed(cmd, op, sector, nr_sectors); in null_process_cmd() 1435 sector_t nr_sectors, enum req_op op) in null_handle_cmd() argument [all …]
|
/linux-6.1.9/block/partitions/ |
D | ibm.c | 201 sector_t nr_sectors, in find_lnx1_partitions() argument 223 size = nr_sectors; in find_lnx1_partitions() 297 sector_t nr_sectors; in ibm_partition() local 312 nr_sectors = bdev_nr_sectors(bdev); in ibm_partition() 313 if (nr_sectors == 0) in ibm_partition() 340 label, labelsect, nr_sectors, in ibm_partition() 357 size = nr_sectors; in ibm_partition()
|
/linux-6.1.9/fs/zonefs/ |
D | trace.h | 30 __field(sector_t, nr_sectors) 37 __entry->nr_sectors = 43 __entry->nr_sectors
|
/linux-6.1.9/drivers/md/ |
D | dm-zone.c | 360 unsigned int nr_sectors; member 426 unsigned int nr_sectors) in dm_zone_map_bio_end() argument 445 WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors); in dm_zone_map_bio_end() 452 if (nr_sectors != orig_bio_details->nr_sectors) { in dm_zone_map_bio_end() 456 WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors); in dm_zone_map_bio_end() 535 orig_bio_details.nr_sectors = bio_sectors(clone); in dm_zone_map_bio()
|
D | md.h | 577 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) in md_sync_acct() argument 579 atomic_add(nr_sectors, &bdev->bd_disk->sync_io); in md_sync_acct() 582 static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) in md_sync_acct_bio() argument 584 md_sync_acct(bio->bi_bdev, nr_sectors); in md_sync_acct_bio()
|
D | dm-log-writes.c | 97 __le64 nr_sectors; member 126 sector_t nr_sectors; member 326 entry.nr_sectors = cpu_to_le64(block->nr_sectors); in log_one_block() 450 lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors); in log_writes_kthread() 703 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); in log_writes_map()
|
D | dm-zoned-target.c | 631 unsigned int nr_sectors = bio_sectors(bio); in dmz_map() local 640 bio_op(bio), (unsigned long long)sector, nr_sectors, in dmz_map() 645 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) in dmz_map() 649 if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK)) in dmz_map() 659 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { in dmz_map() 669 if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd)) in dmz_map()
|
/linux-6.1.9/drivers/block/xen-blkback/ |
D | common.h | 93 uint64_t nr_sectors; member 147 uint64_t nr_sectors; member 422 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; in blkif_get_x86_32_req() 470 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; in blkif_get_x86_64_req()
|
/linux-6.1.9/Documentation/block/ |
D | request.rst | 67 unsigned long nr_sectors DBI Total number of sectors in request 69 unsigned long hard_nr_sectors B Used to keep nr_sectors sane
|
/linux-6.1.9/include/uapi/linux/ |
D | blkzoned.h | 145 __u64 nr_sectors; member
|
D | ublk_cmd.h | 152 __u32 nr_sectors; member
|
/linux-6.1.9/drivers/block/drbd/ |
D | drbd_actlog.c | 852 sector_t esector, nr_sectors; in __drbd_change_sync() local 868 nr_sectors = get_capacity(device->vdisk); in __drbd_change_sync() 871 if (!expect(sector < nr_sectors)) in __drbd_change_sync() 873 if (!expect(esector < nr_sectors)) in __drbd_change_sync() 874 esector = nr_sectors - 1; in __drbd_change_sync() 876 lbnr = BM_SECT_TO_BIT(nr_sectors-1); in __drbd_change_sync() 883 if (unlikely(esector == (nr_sectors-1))) in __drbd_change_sync()
|
/linux-6.1.9/fs/btrfs/ |
D | zoned.c | 358 sector_t nr_sectors; in btrfs_get_dev_zone_info() local 416 nr_sectors = bdev_nr_sectors(bdev); in btrfs_get_dev_zone_info() 418 zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors); in btrfs_get_dev_zone_info() 438 if (!IS_ALIGNED(nr_sectors, zone_sectors)) in btrfs_get_dev_zone_info() 495 while (sector < nr_sectors) { in btrfs_get_dev_zone_info() 873 sector_t nr_sectors; in btrfs_sb_log_location_bdev() local 887 nr_sectors = bdev_nr_sectors(bdev); in btrfs_sb_log_location_bdev() 888 nr_zones = nr_sectors >> zone_sectors_shift; in btrfs_sb_log_location_bdev() 1004 sector_t nr_sectors; in btrfs_reset_sb_log_zones() local 1011 nr_sectors = bdev_nr_sectors(bdev); in btrfs_reset_sb_log_zones() [all …]
|
D | raid56.h | 63 u16 nr_sectors; member
|
D | raid56.c | 147 for (i = 0; i < rbio->nr_sectors; i++) { in cache_rbio_pages() 210 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) { in index_stripe_sectors() 892 ASSERT(index >= 0 && index < rbio->nr_sectors); in sector_in_rbio() 949 rbio->nr_sectors = num_sectors; in alloc_rbio() 1266 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in finish_rmw() 1294 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in finish_rmw() 1454 for (i = 0; i < rbio->nr_sectors; i++) { in find_stripe_sector() 2124 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in __raid56_parity_recover() 2342 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in alloc_rbio_essential_pages() 2635 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in raid56_parity_scrub_stripe()
|
/linux-6.1.9/include/xen/interface/io/ |
D | blkif.h | 214 uint64_t nr_sectors; member
|
/linux-6.1.9/fs/fat/ |
D | inode.c | 68 unsigned nr_sectors; member 75 .nr_sectors = 160 * KB_IN_SECTORS, 82 .nr_sectors = 180 * KB_IN_SECTORS, 89 .nr_sectors = 320 * KB_IN_SECTORS, 96 .nr_sectors = 360 * KB_IN_SECTORS, 1568 if (floppy_defaults[i].nr_sectors == bd_sects) { in fat_read_static_bpb() 1592 bpb->fat_sectors = fdefaults->nr_sectors; in fat_read_static_bpb()
|
/linux-6.1.9/drivers/block/ |
D | floppy.c | 2259 unsigned int nr_sectors = current_count_sectors; in floppy_end_request() local 2264 nr_sectors = blk_rq_cur_sectors(req); in floppy_end_request() 2265 if (blk_update_request(req, error, nr_sectors << 9)) in floppy_end_request() 2321 int nr_sectors; in rw_interrupt() local 2345 nr_sectors = (((reply_buffer[R_TRACK] - raw_cmd->cmd[TRACK]) * heads + in rw_interrupt() 2349 if (nr_sectors / ssize > in rw_interrupt() 2352 nr_sectors, current_count_sectors); in rw_interrupt() 2365 nr_sectors -= in_sector_offset; in rw_interrupt() 2366 INFBOUND(nr_sectors, 0); in rw_interrupt() 2367 SUPBOUND(current_count_sectors, nr_sectors); in rw_interrupt() [all …]
|