Home
last modified time | relevance | path

Searched refs:sectorsize (Results 1 – 25 of 57) sorted by relevance

123

/linux-6.1.9/fs/btrfs/tests/
Dinode-tests.c87 static void setup_file_extents(struct btrfs_root *root, u32 sectorsize) in setup_file_extents() argument
108 offset = sectorsize; in setup_file_extents()
117 insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0, in setup_file_extents()
118 disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); in setup_file_extents()
120 disk_bytenr += sectorsize; in setup_file_extents()
121 offset += sectorsize - 1; in setup_file_extents()
127 insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr, in setup_file_extents()
128 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); in setup_file_extents()
130 offset += sectorsize; in setup_file_extents()
131 insert_extent(root, offset, sectorsize, sectorsize, 0, 0, 0, in setup_file_extents()
[all …]
Dfree-space-tests.c90 static int test_bitmaps(struct btrfs_block_group *cache, u32 sectorsize) in test_bitmaps() argument
130 next_bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize); in test_bitmaps()
159 u32 sectorsize) in test_bitmaps_and_extents() argument
161 u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize); in test_bitmaps_and_extents()
396 u32 sectorsize) in test_steal_space_from_bitmap_to_extent() argument
533 ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize); in test_steal_space_from_bitmap_to_extent()
591 if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) { in test_steal_space_from_bitmap_to_extent()
592 test_err("cache free space is not 1Mb + %u", sectorsize); in test_steal_space_from_bitmap_to_extent()
614 if (cache->free_space_ctl->free_space != sectorsize) { in test_steal_space_from_bitmap_to_extent()
615 test_err("cache free space is not %u", sectorsize); in test_steal_space_from_bitmap_to_extent()
[all …]
Dbtrfs-tests.c117 struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize) in btrfs_alloc_dummy_fs_info() argument
143 fs_info->sectorsize = sectorsize; in btrfs_alloc_dummy_fs_info()
144 fs_info->sectorsize_bits = ilog2(sectorsize); in btrfs_alloc_dummy_fs_info()
230 cache->full_stripe_len = fs_info->sectorsize; in btrfs_alloc_dummy_block_group()
263 u32 sectorsize, nodesize; in btrfs_run_sanity_tests() local
271 sectorsize = test_sectorsize[i]; in btrfs_run_sanity_tests()
272 for (nodesize = sectorsize; in btrfs_run_sanity_tests()
276 sectorsize, nodesize); in btrfs_run_sanity_tests()
277 ret = btrfs_test_free_space_cache(sectorsize, nodesize); in btrfs_run_sanity_tests()
280 ret = btrfs_test_extent_buffer_operations(sectorsize, in btrfs_run_sanity_tests()
[all …]
Dbtrfs-tests.h33 int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
34 int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
35 int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
36 int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
37 int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
38 int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
41 struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize);
Dextent-io-tests.c108 static int test_find_delalloc(u32 sectorsize) in test_find_delalloc() argument
162 set_extent_delalloc(tmp, 0, sectorsize - 1, 0, NULL); in test_find_delalloc()
171 if (start != 0 || end != (sectorsize - 1)) { in test_find_delalloc()
173 sectorsize - 1, start, end); in test_find_delalloc()
193 set_extent_delalloc(tmp, sectorsize, max_bytes - 1, 0, NULL); in test_find_delalloc()
221 test_start = max_bytes + sectorsize; in test_find_delalloc()
430 static int test_eb_bitmaps(u32 sectorsize, u32 nodesize) in test_eb_bitmaps() argument
439 fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); in test_eb_bitmaps()
469 eb = __alloc_dummy_extent_buffer(fs_info, sectorsize, nodesize); in test_eb_bitmaps()
595 int btrfs_test_extent_io(u32 sectorsize, u32 nodesize) in btrfs_test_extent_io() argument
[all …]
Dfree-space-tree-tests.c70 offset += fs_info->sectorsize; in __check_free_space_extents()
423 static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize, in run_test() argument
433 fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); in run_test()
524 static int run_test_both_formats(test_func_t test_func, u32 sectorsize, in run_test_both_formats() argument
530 ret = run_test(test_func, 0, sectorsize, nodesize, alignment); in run_test_both_formats()
534 test_func, sectorsize, nodesize, alignment); in run_test_both_formats()
538 ret = run_test(test_func, 1, sectorsize, nodesize, alignment); in run_test_both_formats()
542 test_func, sectorsize, nodesize, alignment); in run_test_both_formats()
549 int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize) in btrfs_test_free_space_tree() argument
576 ret = run_test_both_formats(tests[i], sectorsize, nodesize, in btrfs_test_free_space_tree()
[all …]
Dextent-buffer-tests.c12 static int test_btrfs_split_item(u32 sectorsize, u32 nodesize) in test_btrfs_split_item() argument
30 fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); in test_btrfs_split_item()
214 int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize) in btrfs_test_extent_buffer_operations() argument
217 return test_btrfs_split_item(sectorsize, nodesize); in btrfs_test_extent_buffer_operations()
Dqgroup-tests.c204 u32 sectorsize, u32 nodesize) in test_no_shared_qgroup() argument
303 u32 sectorsize, u32 nodesize) in test_multiple_refs() argument
439 int btrfs_test_qgroups(u32 sectorsize, u32 nodesize) in btrfs_test_qgroups() argument
446 fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize); in btrfs_test_qgroups()
519 ret = test_no_shared_qgroup(root, sectorsize, nodesize); in btrfs_test_qgroups()
522 ret = test_multiple_refs(root, sectorsize, nodesize); in btrfs_test_qgroups()
/linux-6.1.9/arch/um/drivers/
Dcow_user.c29 __s32 sectorsize; member
48 __s32 sectorsize; member
91 __u32 sectorsize; member
103 __u32 sectorsize; member
125 void cow_sizes(int version, __u64 size, int sectorsize, int align, in cow_sizes() argument
130 *bitmap_len_out = (size + sectorsize - 1) / (8 * sectorsize); in cow_sizes()
133 *data_offset_out = (*data_offset_out + sectorsize - 1) / in cow_sizes()
134 sectorsize; in cow_sizes()
135 *data_offset_out *= sectorsize; in cow_sizes()
138 *bitmap_len_out = DIV_ROUND(size, sectorsize); in cow_sizes()
[all …]
Dcow.h8 int sectorsize, int alignment, int *bitmap_offset_out,
19 int sectorsize, int alignment,
22 extern void cow_sizes(int version, __u64 size, int sectorsize, int align,
Dubd_kern.c64 int sectorsize; member
633 int fd, err, sectorsize, asked_switch, mode = 0644; in open_ubd_file() local
664 &size, &sectorsize, &align, bitmap_offset_out); in open_ubd_file()
682 sectorsize, align, &size); in open_ubd_file()
694 cow_sizes(version, size, sectorsize, align, *bitmap_offset_out, in open_ubd_file()
705 int sectorsize, int alignment, int *bitmap_offset_out, in create_cow_file() argument
719 err = init_cow_file(fd, cow_file, backing_file, sectorsize, alignment, in create_cow_file()
1309 io_req->sectorsize = SECTOR_SIZE; in ubd_alloc_req()
1489 nsectors = desc->length / req->sectorsize; in do_io()
1499 start * req->sectorsize; in do_io()
[all …]
/linux-6.1.9/fs/btrfs/
Dlzo.c133 const u32 sectorsize) in copy_compressed_data_to_page() argument
147 ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize); in copy_compressed_data_to_page()
167 u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize, in copy_compressed_data_to_page()
195 sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out; in copy_compressed_data_to_page()
214 const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize; in lzo_compress_pages() local
237 const u32 sectorsize_mask = sectorsize - 1; in lzo_compress_pages()
249 in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off); in lzo_compress_pages()
265 &cur_out, sectorsize); in lzo_compress_pages()
275 if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) { in lzo_compress_pages()
331 const u32 sectorsize = fs_info->sectorsize; in lzo_decompress_bio() local
[all …]
Ddelalloc-space.c121 bytes = ALIGN(bytes, fs_info->sectorsize); in btrfs_alloc_data_chunk_ondemand()
138 len = round_up(start + len, fs_info->sectorsize) - in btrfs_check_data_free_space()
139 round_down(start, fs_info->sectorsize); in btrfs_check_data_free_space()
140 start = round_down(start, fs_info->sectorsize); in btrfs_check_data_free_space()
176 ASSERT(IS_ALIGNED(len, fs_info->sectorsize)); in btrfs_free_reserved_data_space_noquota()
195 len = round_up(start + len, fs_info->sectorsize) - in btrfs_free_reserved_data_space()
196 round_down(start, fs_info->sectorsize); in btrfs_free_reserved_data_space()
197 start = round_down(start, fs_info->sectorsize); in btrfs_free_reserved_data_space()
328 num_bytes = ALIGN(num_bytes, fs_info->sectorsize); in btrfs_delalloc_reserve_metadata()
329 disk_num_bytes = ALIGN(disk_num_bytes, fs_info->sectorsize); in btrfs_delalloc_reserve_metadata()
[all …]
Draid56.c157 rbio->bioc->fs_info->sectorsize); in cache_rbio_pages()
184 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in full_page_sectors_uptodate() local
185 const u32 sectors_per_page = PAGE_SIZE / sectorsize; in full_page_sectors_uptodate()
206 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in index_stripe_sectors() local
210 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) { in index_stripe_sectors()
222 const u32 sectorsize = src->bioc->fs_info->sectorsize; in steal_rbio_page() local
223 const u32 sectors_per_page = PAGE_SIZE / sectorsize; in steal_rbio_page()
925 ASSERT(IS_ALIGNED(PAGE_SIZE, fs_info->sectorsize)); in alloc_rbio()
1021 const u32 sectorsize = rbio->bioc->fs_info->sectorsize; in rbio_add_io_sector() local
1038 disk_start = stripe->physical + sector_nr * sectorsize; in rbio_add_io_sector()
[all …]
Dtree-checker.c122 end = ALIGN(key->offset + len, leaf->fs_info->sectorsize); in file_extent_end()
204 u32 sectorsize = fs_info->sectorsize; in check_extent_data_item() local
208 if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) { in check_extent_data_item()
211 key->offset, sectorsize); in check_extent_data_item()
297 if (unlikely(CHECK_FE_ALIGNED(leaf, slot, fi, ram_bytes, sectorsize) || in check_extent_data_item()
298 CHECK_FE_ALIGNED(leaf, slot, fi, disk_bytenr, sectorsize) || in check_extent_data_item()
299 CHECK_FE_ALIGNED(leaf, slot, fi, disk_num_bytes, sectorsize) || in check_extent_data_item()
300 CHECK_FE_ALIGNED(leaf, slot, fi, offset, sectorsize) || in check_extent_data_item()
301 CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize))) in check_extent_data_item()
342 u32 sectorsize = fs_info->sectorsize; in check_csum_item() local
[all …]
Dfile-item.c87 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize)); in btrfs_inode_set_file_extent_range()
115 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize) || in btrfs_inode_clear_file_extent_range()
129 return ncsums * fs_info->sectorsize; in max_ordered_sum_bytes()
138 int num_sectors = (int)DIV_ROUND_UP(bytes, fs_info->sectorsize); in btrfs_ordered_sum_size()
271 const u32 sectorsize = fs_info->sectorsize; in search_csum_tree() local
278 ASSERT(IS_ALIGNED(disk_bytenr, sectorsize) && in search_csum_tree()
279 IS_ALIGNED(len, sectorsize)); in search_csum_tree()
289 csum_len = (itemsize / csum_size) * sectorsize; in search_csum_tree()
307 csum_len = (itemsize / csum_size) * sectorsize; in search_csum_tree()
381 const u32 sectorsize = fs_info->sectorsize; in btrfs_lookup_bio_sums() local
[all …]
Dfile.c301 cur = max(cur + fs_info->sectorsize, range.start); in __btrfs_run_defrag_inode()
418 u64 block_start = round_down(pos, fs_info->sectorsize); in btrfs_drop_pages()
419 u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start; in btrfs_drop_pages()
463 start_pos = round_down(pos, fs_info->sectorsize); in btrfs_dirty_pages()
465 fs_info->sectorsize); in btrfs_dirty_pages()
778 fs_info->sectorsize); in btrfs_drop_extents()
1304 start_pos = round_down(pos, fs_info->sectorsize); in lock_and_cleanup_extent_if_need()
1305 last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1; in lock_and_cleanup_extent_if_need()
1391 lockstart = round_down(pos, fs_info->sectorsize); in btrfs_check_nocow_lock()
1393 fs_info->sectorsize) - 1; in btrfs_check_nocow_lock()
[all …]
Dscrub.c329 sblock->len += sblock->sctx->fs_info->sectorsize; in alloc_scrub_sector()
868 fs_info->sectorsize, nlink, in scrub_print_warning_inode()
1466 sublen = min_t(u64, length, fs_info->sectorsize); in scrub_setup_recheck_block()
1594 bio_add_scrub_sector(bio, sector, fs_info->sectorsize); in scrub_recheck_block_on_raid56()
1645 bio_add_scrub_sector(&bio, sector, fs_info->sectorsize); in scrub_recheck_block()
1708 const u32 sectorsize = fs_info->sectorsize; in scrub_repair_sector_from_good_copy() local
1725 ret = bio_add_scrub_sector(&bio, sector_good, sectorsize); in scrub_repair_sector_from_good_copy()
1765 const u32 sectorsize = sblock->sctx->fs_info->sectorsize; in scrub_write_sector_to_dev_replace() local
1769 memset(scrub_sector_get_kaddr(sector), 0, sectorsize); in scrub_write_sector_to_dev_replace()
1807 const u32 sectorsize = sctx->fs_info->sectorsize; in scrub_add_sector_to_wr_bio() local
[all …]
Dsubpage.c68 if (fs_info->sectorsize >= PAGE_SIZE) in btrfs_is_subpage()
89 void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize) in btrfs_init_subpage_info() argument
94 ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize)); in btrfs_init_subpage_info()
96 nr_bits = PAGE_SIZE / sectorsize; in btrfs_init_subpage_info()
164 ASSERT(fs_info->sectorsize < PAGE_SIZE); in btrfs_alloc_subpage()
232 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && in btrfs_subpage_assert()
233 IS_ALIGNED(len, fs_info->sectorsize)); in btrfs_subpage_assert()
Dextent_io.c746 const u32 sectorsize = fs_info->sectorsize; in btrfs_get_io_failure_record() local
760 ASSERT(failrec->len == fs_info->sectorsize); in btrfs_get_io_failure_record()
770 failrec->len = sectorsize; in btrfs_get_io_failure_record()
779 failrec->num_copies = btrfs_num_copies(fs_info, failrec->logical, sectorsize); in btrfs_get_io_failure_record()
903 const u32 sectorsize = inode->root->fs_info->sectorsize; in end_sector_io() local
906 end_page_read(page, uptodate, offset, sectorsize); in end_sector_io()
909 offset + sectorsize - 1, &cached, GFP_ATOMIC); in end_sector_io()
910 unlock_extent_atomic(&inode->io_tree, offset, offset + sectorsize - 1, in end_sector_io()
924 const u32 sectorsize = fs_info->sectorsize; in submit_data_read_repair() local
944 const unsigned int offset = i * sectorsize; in submit_data_read_repair()
[all …]
Ddisk-io.c867 if (fs_info->sectorsize == PAGE_SIZE) { in btree_dirty_folio()
890 cur = page_start + cur_bit * fs_info->sectorsize; in btree_dirty_folio()
2623 u64 sectorsize = btrfs_super_sectorsize(sb); in btrfs_validate_super() local
2655 if (!is_power_of_2(sectorsize) || sectorsize < 4096 || in btrfs_validate_super()
2656 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) { in btrfs_validate_super()
2657 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize); in btrfs_validate_super()
2669 if (sectorsize > PAGE_SIZE || (sectorsize != SZ_4K && sectorsize != PAGE_SIZE)) { in btrfs_validate_super()
2672 sectorsize, PAGE_SIZE); in btrfs_validate_super()
2676 if (!is_power_of_2(nodesize) || nodesize < sectorsize || in btrfs_validate_super()
2688 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) { in btrfs_validate_super()
[all …]
Dfree-space-tree.c47 bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; in set_free_space_tree_thresholds()
256 fs_info->sectorsize); in convert_free_space_to_bitmaps()
258 fs_info->sectorsize); in convert_free_space_to_bitmaps()
299 bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; in convert_free_space_to_bitmaps()
395 fs_info->sectorsize * in convert_free_space_to_extents()
438 key.objectid = start + start_bit * block_group->fs_info->sectorsize; in convert_free_space_to_extents()
440 key.offset = (end_bit - start_bit) * block_group->fs_info->sectorsize; in convert_free_space_to_extents()
527 block_group->fs_info->sectorsize); in free_space_test_bit()
616 u64 prev_block = start - block_group->fs_info->sectorsize; in modify_free_space_bitmap()
1472 offset += fs_info->sectorsize; in load_free_space_bitmaps()
/linux-6.1.9/drivers/mtd/nand/raw/atmel/
Dpmecc.c226 if (req->ecc.sectorsize == 512) { in atmel_pmecc_create_gf_tables()
260 if (req->ecc.sectorsize == 512) in atmel_pmecc_get_gf_tables()
289 if (req->ecc.sectorsize == ATMEL_PMECC_SECTOR_SIZE_AUTO) { in atmel_pmecc_prepare_user_req()
294 req->ecc.sectorsize = 1024; in atmel_pmecc_prepare_user_req()
296 req->ecc.sectorsize = 512; in atmel_pmecc_prepare_user_req()
299 if (req->ecc.sectorsize != 512 && req->ecc.sectorsize != 1024) in atmel_pmecc_prepare_user_req()
302 if (req->pagesize % req->ecc.sectorsize) in atmel_pmecc_prepare_user_req()
305 req->ecc.nsectors = req->pagesize / req->ecc.sectorsize; in atmel_pmecc_prepare_user_req()
316 nbytes = DIV_ROUND_UP(strength * fls(8 * req->ecc.sectorsize), in atmel_pmecc_prepare_user_req()
400 if (req->ecc.sectorsize == 1024) in atmel_pmecc_create_user()
[all …]
Dpmecc.h47 int sectorsize; member
/linux-6.1.9/drivers/md/
Ddm-log-writes.c85 __le32 sectorsize; member
106 u32 sectorsize; member
239 lc->sectorsize - entrylen - datalen); in write_metadata()
242 ret = bio_add_page(bio, page, lc->sectorsize, 0); in write_metadata()
243 if (ret != lc->sectorsize) { in write_metadata()
281 pg_sectorlen = ALIGN(pg_datalen, lc->sectorsize); in write_inline_data()
407 super.sectorsize = cpu_to_le32(lc->sectorsize); in log_super()
549 lc->sectorsize = bdev_logical_block_size(lc->dev->bdev); in log_writes_ctr()
550 lc->sectorshift = ilog2(lc->sectorsize); in log_writes_ctr()
565 lc->next_sector = lc->sectorsize >> SECTOR_SHIFT; in log_writes_ctr()
[all …]

123