Home
last modified time | relevance | path

Searched refs:block_start (Results 1 – 25 of 45) sorted by relevance

12

/linux-6.1.9/fs/btrfs/tests/
Dinode-tests.c265 if (em->block_start != EXTENT_MAP_HOLE) { in test_btrfs_get_extent()
266 test_err("expected a hole, got %llu", em->block_start); in test_btrfs_get_extent()
284 if (em->block_start != EXTENT_MAP_HOLE) { in test_btrfs_get_extent()
285 test_err("expected a hole, got %llu", em->block_start); in test_btrfs_get_extent()
306 if (em->block_start != EXTENT_MAP_INLINE) { in test_btrfs_get_extent()
307 test_err("expected an inline, got %llu", em->block_start); in test_btrfs_get_extent()
334 if (em->block_start != EXTENT_MAP_HOLE) { in test_btrfs_get_extent()
335 test_err("expected a hole, got %llu", em->block_start); in test_btrfs_get_extent()
357 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { in test_btrfs_get_extent()
358 test_err("expected a real extent, got %llu", em->block_start); in test_btrfs_get_extent()
[all …]
Dextent-map-tests.c28 em->start, em->len, em->block_start, in free_extent_map_tree()
72 em->block_start = 0; in test_case_1()
93 em->block_start = SZ_32K; /* avoid merging */ in test_case_1()
114 em->block_start = start; in test_case_1()
125 em->block_start != 0 || em->block_len != SZ_16K)) { in test_case_1()
129 em->block_start, em->block_len); in test_case_1()
160 em->block_start = EXTENT_MAP_INLINE; in test_case_2()
181 em->block_start = SZ_4K; in test_case_2()
202 em->block_start = EXTENT_MAP_INLINE; in test_case_2()
213 em->block_start != EXTENT_MAP_INLINE || em->block_len != (u64)-1)) { in test_case_2()
[all …]
/linux-6.1.9/fs/isofs/
Dcompress.c40 static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, in zisofs_uncompress_block() argument
49 int i, block_size = block_end - block_start; in zisofs_uncompress_block()
54 int needblocks = (block_size + (block_start & bufmask) + bufmask) in zisofs_uncompress_block()
77 blocknum = block_start >> bufshift; in zisofs_uncompress_block()
138 (block_start & bufmask); in zisofs_uncompress_block()
140 (block_start & bufmask), in zisofs_uncompress_block()
143 block_start = 0; in zisofs_uncompress_block()
210 loff_t block_start, block_end; in zisofs_fill_pages() local
246 block_start = le32_to_cpu(*(__le32 *) in zisofs_fill_pages()
262 if (block_start > block_end) { in zisofs_fill_pages()
[all …]
/linux-6.1.9/fs/btrfs/
Dextent_map.c218 ASSERT(next->block_start != EXTENT_MAP_DELALLOC && in mergable_maps()
219 prev->block_start != EXTENT_MAP_DELALLOC); in mergable_maps()
228 ((next->block_start == EXTENT_MAP_HOLE && in mergable_maps()
229 prev->block_start == EXTENT_MAP_HOLE) || in mergable_maps()
230 (next->block_start == EXTENT_MAP_INLINE && in mergable_maps()
231 prev->block_start == EXTENT_MAP_INLINE) || in mergable_maps()
232 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 && in mergable_maps()
233 next->block_start == extent_map_block_end(prev)))) { in mergable_maps()
264 em->block_start = merge->block_start; in try_merge_map()
580 if (em->block_start < EXTENT_MAP_LAST_BYTE && in merge_extent_mapping()
[all …]
Dextent_map.h43 u64 block_start; member
82 if (em->block_start + em->block_len < em->block_start) in extent_map_block_end()
84 return em->block_start + em->block_len; in extent_map_block_end()
Dinode.c123 u64 len, u64 orig_start, u64 block_start,
1119 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { in get_extent_allocation_hint()
1122 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) in get_extent_allocation_hint()
1123 alloc_hint = em->block_start; in get_extent_allocation_hint()
1127 alloc_hint = em->block_start; in get_extent_allocation_hint()
2564 ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE); in split_zoned_em()
2576 split_pre->block_start = em->block_start; in split_zoned_em()
2597 split_mid->block_start = em->block_start + pre; in split_zoned_em()
2611 split_post->block_start = em->block_start + em->len - post; in split_zoned_em()
2810 if (em->block_start != EXTENT_MAP_HOLE) in btrfs_find_new_delalloc_bytes()
[all …]
/linux-6.1.9/fs/reiserfs/
Dfile.c173 unsigned block_start, block_end; in reiserfs_commit_page() local
195 for (bh = head = page_buffers(page), block_start = 0; in reiserfs_commit_page()
196 bh != head || !block_start; in reiserfs_commit_page()
197 block_start = block_end, bh = bh->b_this_page) { in reiserfs_commit_page()
201 block_end = block_start + blocksize; in reiserfs_commit_page()
202 if (block_end <= from || block_start >= to) { in reiserfs_commit_page()
/linux-6.1.9/kernel/sched/
Dstats.c50 u64 sleep_start, block_start; in __update_stats_enqueue_sleeper() local
53 block_start = schedstat_val(stats->block_start); in __update_stats_enqueue_sleeper()
73 if (block_start) { in __update_stats_enqueue_sleeper()
74 u64 delta = rq_clock(rq) - block_start; in __update_stats_enqueue_sleeper()
82 __schedstat_set(stats->block_start, 0); in __update_stats_enqueue_sleeper()
/linux-6.1.9/fs/cramfs/
Dinode.c823 u32 block_ptr, block_start, block_len; in cramfs_read_folio() local
839 block_start = block_ptr << CRAMFS_BLK_DIRECT_PTR_SHIFT; in cramfs_read_folio()
848 cramfs_read(sb, block_start, 2); in cramfs_read_folio()
849 block_start += 2; in cramfs_read_folio()
859 block_start = OFFSET(inode) + maxblock * 4; in cramfs_read_folio()
861 block_start = *(u32 *) in cramfs_read_folio()
864 if (unlikely(block_start & CRAMFS_BLK_FLAG_DIRECT_PTR)) { in cramfs_read_folio()
866 u32 prev_start = block_start; in cramfs_read_folio()
867 block_start = prev_start & ~CRAMFS_BLK_FLAGS; in cramfs_read_folio()
868 block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT; in cramfs_read_folio()
[all …]
/linux-6.1.9/drivers/fpga/
Dmicrochip-spi.c100 u32 block_start, component_size; in mpf_ops_parse_header() local
133 block_start = get_unaligned_le32(buf + block_start_offset); in mpf_ops_parse_header()
137 bitstream_start = block_start; in mpf_ops_parse_header()
138 info->header_size = block_start; in mpf_ops_parse_header()
139 if (block_start > count) in mpf_ops_parse_header()
144 components_size_start = block_start; in mpf_ops_parse_header()
/linux-6.1.9/drivers/gpu/drm/
Ddrm_buddy.c347 u64 block_start; in alloc_range_bias() local
361 block_start = drm_buddy_block_offset(block); in alloc_range_bias()
362 block_end = block_start + drm_buddy_block_size(mm, block) - 1; in alloc_range_bias()
364 if (!overlaps(start, end, block_start, block_end)) in alloc_range_bias()
370 if (contains(start, end, block_start, block_end) && in alloc_range_bias()
494 u64 block_start; in __alloc_range() local
505 block_start = drm_buddy_block_offset(block); in __alloc_range()
506 block_end = block_start + drm_buddy_block_size(mm, block) - 1; in __alloc_range()
508 if (!overlaps(start, end, block_start, block_end)) in __alloc_range()
516 if (contains(start, end, block_start, block_end)) { in __alloc_range()
/linux-6.1.9/fs/ocfs2/
Daops.c422 unsigned block_start, block_end; in walk_page_buffers() local
427 for ( bh = head, block_start = 0; in walk_page_buffers()
428 ret == 0 && (bh != head || !block_start); in walk_page_buffers()
429 block_start = block_end, bh = next) in walk_page_buffers()
432 block_end = block_start + blocksize; in walk_page_buffers()
433 if (block_end <= from || block_start >= to) { in walk_page_buffers()
572 unsigned int block_start) in ocfs2_should_read_blk() argument
574 u64 offset = page_offset(page) + block_start; in ocfs2_should_read_blk()
598 unsigned int block_end, block_start; in ocfs2_map_page_blocks() local
605 for (bh = head, block_start = 0; bh != head || !block_start; in ocfs2_map_page_blocks()
[all …]
/linux-6.1.9/fs/
Dbuffer.c1863 unsigned int block_start, block_end; in page_zero_new_buffers() local
1871 block_start = 0; in page_zero_new_buffers()
1873 block_end = block_start + bh->b_size; in page_zero_new_buffers()
1876 if (block_end > from && block_start < to) { in page_zero_new_buffers()
1880 start = max(from, block_start); in page_zero_new_buffers()
1892 block_start = block_end; in page_zero_new_buffers()
1959 unsigned block_start, block_end; in __block_write_begin_int() local
1976 for(bh = head, block_start = 0; bh != head || !block_start; in __block_write_begin_int()
1977 block++, block_start=block_end, bh = bh->b_this_page) { in __block_write_begin_int()
1978 block_end = block_start + blocksize; in __block_write_begin_int()
[all …]
/linux-6.1.9/fs/nilfs2/
Dpage.c425 unsigned int block_start, block_end; in nilfs_page_count_clean_buffers() local
429 for (bh = head = page_buffers(page), block_start = 0; in nilfs_page_count_clean_buffers()
430 bh != head || !block_start; in nilfs_page_count_clean_buffers()
431 block_start = block_end, bh = bh->b_this_page) { in nilfs_page_count_clean_buffers()
432 block_end = block_start + bh->b_size; in nilfs_page_count_clean_buffers()
433 if (block_end > from && block_start < to && !buffer_dirty(bh)) in nilfs_page_count_clean_buffers()
/linux-6.1.9/fs/ext4/
Dmove_extent.c174 unsigned int blocksize, block_start, block_end; in mext_page_mkuptodate() local
188 for (bh = head, block_start = 0; bh != head || !block_start; in mext_page_mkuptodate()
189 block++, block_start = block_end, bh = bh->b_this_page) { in mext_page_mkuptodate()
190 block_end = block_start + blocksize; in mext_page_mkuptodate()
191 if (block_end <= from || block_start >= to) { in mext_page_mkuptodate()
205 zero_user(page, block_start, blocksize); in mext_page_mkuptodate()
Dpage-io.c438 unsigned block_start; in ext4_bio_write_page() local
474 block_start = bh_offset(bh); in ext4_bio_write_page()
475 if (block_start >= len) { in ext4_bio_write_page()
Dinode.c986 unsigned block_start, block_end; in ext4_walk_page_buffers() local
991 for (bh = head, block_start = 0; in ext4_walk_page_buffers()
992 ret == 0 && (bh != head || !block_start); in ext4_walk_page_buffers()
993 block_start = block_end, bh = next) { in ext4_walk_page_buffers()
995 block_end = block_start + blocksize; in ext4_walk_page_buffers()
996 if (block_end <= from || block_start >= to) { in ext4_walk_page_buffers()
1065 unsigned block_start, block_end; in ext4_block_write_begin() local
1085 for (bh = head, block_start = 0; bh != head || !block_start; in ext4_block_write_begin()
1086 block++, block_start = block_end, bh = bh->b_this_page) { in ext4_block_write_begin()
1087 block_end = block_start + blocksize; in ext4_block_write_begin()
[all …]
/linux-6.1.9/lib/zlib_deflate/
Ddeflate.c539 s->block_start = 0L; in lm_init()
768 s->block_start -= (long) wsize; in fill_window()
832 zlib_tr_flush_block(s, (s->block_start >= 0L ? \
833 (char *)&s->window[(unsigned)s->block_start] : \
835 (ulg)((long)s->strstart - s->block_start), \
837 s->block_start = s->strstart; \
878 s->block_start >= (long)s->w_size, "slide too late"); in deflate_stored()
885 Assert(s->block_start >= 0L, "block gone"); in deflate_stored()
891 max_start = s->block_start + max_block_size; in deflate_stored()
901 if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { in deflate_stored()
/linux-6.1.9/drivers/mtd/parsers/
Dafs.c227 u32 block_start; in afs_parse_v2_partition() local
281 block_start = imginfo[20]; in afs_parse_v2_partition()
287 block_start, block_end); in afs_parse_v2_partition()
/linux-6.1.9/arch/arm/mm/
Dmmu.c1197 phys_addr_t block_start, block_end, memblock_limit = 0; in adjust_lowmem_bounds() local
1215 for_each_mem_range(i, &block_start, &block_end) { in adjust_lowmem_bounds()
1216 if (!IS_ALIGNED(block_start, PMD_SIZE)) { in adjust_lowmem_bounds()
1219 len = round_up(block_start, PMD_SIZE) - block_start; in adjust_lowmem_bounds()
1220 memblock_mark_nomap(block_start, len); in adjust_lowmem_bounds()
1225 for_each_mem_range(i, &block_start, &block_end) { in adjust_lowmem_bounds()
1226 if (block_start < vmalloc_limit) { in adjust_lowmem_bounds()
1252 if (!IS_ALIGNED(block_start, PMD_SIZE)) in adjust_lowmem_bounds()
1253 memblock_limit = block_start; in adjust_lowmem_bounds()
/linux-6.1.9/fs/ntfs/
Dmft.c464 unsigned int block_start, block_end, m_start, m_end, page_ofs; in ntfs_sync_mft_mirror() local
513 block_start = 0; in ntfs_sync_mft_mirror()
517 block_end = block_start + blocksize; in ntfs_sync_mft_mirror()
521 if (unlikely(block_start >= m_end)) in ntfs_sync_mft_mirror()
532 (block_start - m_start); in ntfs_sync_mft_mirror()
568 BUG_ON(!nr_bhs && (m_start != block_start)); in ntfs_sync_mft_mirror()
572 } while (block_start = block_end, (bh = bh->b_this_page) != head); in ntfs_sync_mft_mirror()
670 unsigned int block_start, block_end, m_start, m_end; in write_mft_record_nolock() local
693 block_start = 0; in write_mft_record_nolock()
697 block_end = block_start + blocksize; in write_mft_record_nolock()
[all …]
/linux-6.1.9/fs/iomap/
Dbuffered-io.c514 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, in iomap_read_folio_sync() argument
521 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); in iomap_read_folio_sync()
532 loff_t block_start = round_down(pos, block_size); in __iomap_write_begin() local
547 iomap_adjust_read_range(iter->inode, folio, &block_start, in __iomap_write_begin()
548 block_end - block_start, &poff, &plen); in __iomap_write_begin()
557 if (iomap_block_needs_zeroing(iter, block_start)) { in __iomap_write_begin()
567 status = iomap_read_folio_sync(block_start, folio, in __iomap_write_begin()
573 } while ((block_start += plen) < block_end); in __iomap_write_begin()
/linux-6.1.9/fs/jbd2/
Djournal.c1769 unsigned long long phys_block, block_start, block_stop; /* physical */ in __jbd2_journal_erase() local
1787 block_start = ~0ULL; in __jbd2_journal_erase()
1795 if (block_start == ~0ULL) { in __jbd2_journal_erase()
1796 block_start = phys_block; in __jbd2_journal_erase()
1797 block_stop = block_start - 1; in __jbd2_journal_erase()
1822 byte_start = block_start * journal->j_blocksize; in __jbd2_journal_erase()
1824 byte_count = (block_stop - block_start + 1) * in __jbd2_journal_erase()
1844 err, block_start, block_stop); in __jbd2_journal_erase()
1849 block_start = ~0ULL; in __jbd2_journal_erase()
/linux-6.1.9/drivers/net/ethernet/mellanox/mlxsw/
Dcore_acl_flex_keys.c499 int block_start, int block_end) in mlxsw_afk_clear() argument
503 for (i = block_start; i <= block_end; i++) in mlxsw_afk_clear()
Dcore_acl_flex_keys.h220 int block_start, int block_end);

12