Home
last modified time | relevance | path

Searched refs:folios (Results 1 – 8 of 8) sorted by relevance

/linux-5.19.10/include/linux/
Dpagevec.h97 struct folio *folios[PAGEVEC_SIZE]; member
103 offsetof(struct folio_batch, folios));
140 fbatch->folios[fbatch->nr++] = folio; in folio_batch_add()
/linux-5.19.10/mm/
Dtruncate.c71 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals()
84 struct folio *folio = fbatch->folios[i]; in truncate_folio_batch_exceptionals()
88 fbatch->folios[j++] = folio; in truncate_folio_batch_exceptionals()
369 truncate_cleanup_folio(fbatch.folios[i]); in truncate_inode_pages_range()
372 folio_unlock(fbatch.folios[i]); in truncate_inode_pages_range()
415 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range()
515 struct folio *folio = fbatch.folios[i]; in invalidate_mapping_pagevec()
646 struct folio *folio = fbatch.folios[i]; in invalidate_inode_pages2_range()
Dfilemap.c280 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
300 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
302 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
329 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch()
341 filemap_free_folio(mapping, fbatch->folios[i]); in delete_from_page_cache_batch()
2608 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; in filemap_get_pages()
2716 fbatch.folios[0])) in filemap_read()
2717 folio_mark_accessed(fbatch.folios[0]); in filemap_read()
2720 struct folio *folio = fbatch.folios[i]; in filemap_read()
2752 folio_put(fbatch.folios[i]); in filemap_read()
Dswap.c1082 struct folio *folio = fbatch->folios[i]; in folio_batch_remove_exceptionals()
1084 fbatch->folios[j++] = folio; in folio_batch_remove_exceptionals()
Dshmem.c938 folio = fbatch.folios[i]; in shmem_undo_range()
1000 folio = fbatch.folios[i]; in shmem_undo_range()
1211 struct folio *folio = fbatch->folios[i]; in shmem_unuse_swap_entries()
/linux-5.19.10/Documentation/filesystems/
Dnetfs_library.rst104 * Handle folios that span multiple pages.
109 don't match folio sizes or folio alignments and that may cross folios.
363 it transferred. The filesystem also should not deal with setting folios
367 Note that the helpers have the folios locked, but not pinned. It is
391 [Optional] This is called after the folios in the request have all been
438 * Once the data is read, the folios that have been fully read/cleared:
446 * Any folios that need writing to the cache will then have DIO writes issued.
450 * Writes to the cache will proceed asynchronously and the folios will have the
Dvfs.rst623 on dirty pages, and ->release_folio on clean folios with the private
876 release_folio is called on folios with private data to tell the
886 some or all folios in an address_space. This can happen
891 and needs to be certain that all folios are invalidated, then
939 some filesystems have more complex state (unstable folios in NFS
Dlocking.rst300 ->readahead() unlocks the folios that I/O is attempted on like ->read_folio().