/linux-6.1.9/fs/netfs/ |
D | buffered_read.c | 54 pg_end = folio_pos(folio) + folio_size(folio) - 1; in netfs_rreq_unlock_folios() 230 folio_file_pos(folio), folio_size(folio), in netfs_read_folio() 276 size_t plen = folio_size(folio); in netfs_skip_folio_read() 381 folio_file_pos(folio), folio_size(folio), in netfs_write_begin()
|
/linux-6.1.9/fs/iomap/ |
D | buffered-io.c | 99 size_t plen = min_t(loff_t, folio_size(folio) - poff, length); in iomap_adjust_read_range() 333 .len = folio_size(folio), in iomap_read_folio() 447 count = min(folio_size(folio) - from, count); in iomap_is_partially_uptodate() 463 folio_size(folio)); in iomap_release_folio() 487 if (offset == 0 && len == folio_size(folio)) { in iomap_invalidate_folio() 621 if (pos + len > folio_pos(folio) + folio_size(folio)) in iomap_write_begin() 622 len = folio_pos(folio) + folio_size(folio) - pos; in iomap_write_begin() 916 if (bytes > folio_size(folio) - offset) in iomap_zero_iter() 917 bytes = folio_size(folio) - offset; in iomap_zero_iter() 1444 trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio)); in iomap_do_writepage() [all …]
|
/linux-6.1.9/fs/erofs/ |
D | fscache.c | 85 pgend = pgpos + folio_size(folio); in erofs_fscache_rreq_unlock_folios() 234 folio_pos(folio), folio_size(folio)); in erofs_fscache_meta_read_folio() 336 DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ); in erofs_fscache_read_folio() 339 folio_size(folio), &unlock); in erofs_fscache_read_folio() 372 size -= folio_size(folio); in erofs_fscache_readahead()
|
D | super.c | 636 DBG_BUGON(stop > folio_size(folio) || stop < length); in erofs_managed_cache_invalidate_folio() 638 if (offset == 0 && stop == folio_size(folio)) in erofs_managed_cache_invalidate_folio()
|
/linux-6.1.9/mm/ |
D | truncate.c | 179 folio_invalidate(folio, 0, folio_size(folio)); in truncate_cleanup_folio() 220 length = folio_size(folio); in truncate_inode_partial_folio() 227 if (length == folio_size(folio)) { in truncate_inode_partial_folio() 380 same_folio = lend < folio_pos(folio) + folio_size(folio); in truncate_inode_pages_range()
|
D | usercopy.c | 198 if (n > folio_size(folio) - offset) in check_heap_object()
|
D | secretmem.c | 156 folio_zero_segment(folio, 0, folio_size(folio)); in secretmem_free_folio()
|
D | slab_common.c | 1033 if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE)) in __ksize() 1037 return folio_size(folio); in __ksize()
|
D | slob.c | 594 return folio_size(folio); in __ksize()
|
D | filemap.c | 2708 size_t fsize = folio_size(folio); in filemap_read() 2851 } while (offset < folio_size(folio)); in folio_seek_hole_data() 2862 return folio_size(folio); in seek_folio_size()
|
/linux-6.1.9/fs/afs/ |
D | write.c | 488 psize = folio_size(folio); in afs_extend_writeback() 591 (to == folio_size(folio) || new_content)) in afs_write_back_from_locked_folio() 741 start += folio_size(folio); in afs_writepages_region() 756 start += folio_size(folio); in afs_writepages_region() 932 priv = afs_folio_dirty(folio, 0, folio_size(folio)); in afs_page_mkwrite() 994 t = folio_size(folio); in afs_launder_folio()
|
D | file.c | 346 fsreq->len = folio_size(folio); in afs_symlink_read_folio() 418 if (offset == 0 && length == folio_size(folio)) in afs_invalidate_dirty()
|
D | dir.c | 151 size = min_t(loff_t, folio_size(folio), i_size - pos); in afs_dir_check_folio() 206 size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio)); in afs_dir_dump() 532 size = min_t(loff_t, folio_size(folio), in afs_dir_iterate() 2036 if (offset == 0 && length == folio_size(folio)) in afs_dir_invalidate_folio()
|
/linux-6.1.9/include/linux/ |
D | bio.h | 286 fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count); in bio_first_folio() 297 fi->length = min(folio_size(fi->folio), fi->_seg_count); in bio_next_folio()
|
D | pagemap.h | 1406 return folio_size(folio); in folio_mkwrite_check_truncate() 1456 return folio_size(folio) >> inode->i_blkbits; in i_blocks_per_folio()
|
/linux-6.1.9/fs/ceph/ |
D | addr.c | 146 if (offset != 0 || length != folio_size(folio)) { in ceph_invalidate_folio() 588 folio_invalidate(folio, 0, folio_size(folio)); in writepage_nounlock() 923 folio_size(folio)); in ceph_writepages_start() 1679 if (len > folio_size(folio)) in ceph_uninline_data() 1680 len = folio_size(folio); in ceph_uninline_data()
|
/linux-6.1.9/fs/orangefs/ |
D | inode.c | 304 bv.bv_len = folio_size(folio); in orangefs_read_folio() 306 iov_iter_bvec(&iter, READ, &bv, 1, folio_size(folio)); in orangefs_read_folio() 309 folio_size(folio), inode->i_size, NULL, NULL, file); in orangefs_read_folio()
|
/linux-6.1.9/fs/9p/ |
D | vfs_addr.c | 167 size_t len = folio_size(folio); in v9fs_vfs_write_folio_locked()
|
/linux-6.1.9/mm/kasan/ |
D | common.c | 287 kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false); in __kasan_slab_free_mempool()
|
/linux-6.1.9/fs/ |
D | buffer.c | 1494 BUG_ON(stop > folio_size(folio) || stop < length); in block_invalidate_folio() 1525 if (length == folio_size(folio)) in block_invalidate_folio() 2207 to = min_t(unsigned, folio_size(folio) - from, count); in block_is_partially_uptodate() 2209 if (from < blocksize && to > folio_size(folio) - blocksize) in block_is_partially_uptodate()
|
/linux-6.1.9/arch/arm64/mm/ |
D | hugetlbpage.c | 264 ncontig = num_contig_ptes(folio_size(folio), &pgsize); in set_huge_pte_at()
|
/linux-6.1.9/fs/gfs2/ |
D | aops.c | 642 int partial_page = (offset || length < folio_size(folio)); in gfs2_invalidate_folio()
|
/linux-6.1.9/fs/nfs/ |
D | file.c | 407 if (offset != 0 || length < folio_size(folio)) in nfs_invalidate_folio()
|
/linux-6.1.9/fs/jfs/ |
D | jfs_metapage.c | 559 BUG_ON(offset || length < folio_size(folio)); in metapage_invalidate_folio()
|
/linux-6.1.9/fs/jbd2/ |
D | transaction.c | 2456 int partial_page = (offset || length < folio_size(folio)); in jbd2_journal_invalidate_folio() 2466 BUG_ON(stop > folio_size(folio) || stop < length); in jbd2_journal_invalidate_folio()
|