/linux-6.1.9/mm/ |
D | swap.c | 52 struct folio_batch fbatch; member 232 static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) in folio_batch_move_lru() argument 238 for (i = 0; i < folio_batch_count(fbatch); i++) { in folio_batch_move_lru() 239 struct folio *folio = fbatch->folios[i]; in folio_batch_move_lru() 253 folios_put(fbatch->folios, folio_batch_count(fbatch)); in folio_batch_move_lru() 254 folio_batch_init(fbatch); in folio_batch_move_lru() 257 static void folio_batch_add_and_move(struct folio_batch *fbatch, in folio_batch_add_and_move() argument 260 if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) && in folio_batch_add_and_move() 263 folio_batch_move_lru(fbatch, move_fn); in folio_batch_add_and_move() 287 struct folio_batch *fbatch; in folio_rotate_reclaimable() local [all …]
|
D | truncate.c | 61 struct folio_batch *fbatch, pgoff_t *indices) in truncate_folio_batch_exceptionals() argument 70 for (j = 0; j < folio_batch_count(fbatch); j++) in truncate_folio_batch_exceptionals() 71 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals() 74 if (j == folio_batch_count(fbatch)) in truncate_folio_batch_exceptionals() 83 for (i = j; i < folio_batch_count(fbatch); i++) { in truncate_folio_batch_exceptionals() 84 struct folio *folio = fbatch->folios[i]; in truncate_folio_batch_exceptionals() 88 fbatch->folios[j++] = folio; in truncate_folio_batch_exceptionals() 106 fbatch->nr = j; in truncate_folio_batch_exceptionals() 335 struct folio_batch fbatch; in truncate_inode_pages_range() local 362 folio_batch_init(&fbatch); in truncate_inode_pages_range() [all …]
|
D | filemap.c | 278 struct folio_batch *fbatch) in page_cache_delete_batch() argument 280 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch() 287 if (i >= folio_batch_count(fbatch)) in page_cache_delete_batch() 300 if (folio != fbatch->folios[i]) { in page_cache_delete_batch() 302 fbatch->folios[i]->index, folio); in page_cache_delete_batch() 319 struct folio_batch *fbatch) in delete_from_page_cache_batch() argument 323 if (!folio_batch_count(fbatch)) in delete_from_page_cache_batch() 328 for (i = 0; i < folio_batch_count(fbatch); i++) { in delete_from_page_cache_batch() 329 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch() 334 page_cache_delete_batch(mapping, fbatch); in delete_from_page_cache_batch() [all …]
|
D | shmem.c | 860 struct folio_batch fbatch; in shmem_unlock_mapping() local 863 folio_batch_init(&fbatch); in shmem_unlock_mapping() 868 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) { in shmem_unlock_mapping() 869 check_move_unevictable_folios(&fbatch); in shmem_unlock_mapping() 870 folio_batch_release(&fbatch); in shmem_unlock_mapping() 907 struct folio_batch fbatch; in shmem_undo_range() local 921 folio_batch_init(&fbatch); in shmem_undo_range() 924 &fbatch, indices)) { in shmem_undo_range() 925 for (i = 0; i < folio_batch_count(&fbatch); i++) { in shmem_undo_range() 926 folio = fbatch.folios[i]; in shmem_undo_range() [all …]
|
D | internal.h | 110 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); 112 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
|
D | vmscan.c | 7705 struct folio_batch fbatch; in check_move_unevictable_pages() local 7708 folio_batch_init(&fbatch); in check_move_unevictable_pages() 7714 folio_batch_add(&fbatch, page_folio(page)); in check_move_unevictable_pages() 7716 check_move_unevictable_folios(&fbatch); in check_move_unevictable_pages() 7729 void check_move_unevictable_folios(struct folio_batch *fbatch) in check_move_unevictable_folios() argument 7736 for (i = 0; i < fbatch->nr; i++) { in check_move_unevictable_folios() 7737 struct folio *folio = fbatch->folios[i]; in check_move_unevictable_folios()
|
/linux-6.1.9/include/linux/ |
D | pagevec.h | 100 static inline void folio_batch_init(struct folio_batch *fbatch) in folio_batch_init() argument 102 fbatch->nr = 0; in folio_batch_init() 103 fbatch->percpu_pvec_drained = false; in folio_batch_init() 106 static inline unsigned int folio_batch_count(struct folio_batch *fbatch) in folio_batch_count() argument 108 return fbatch->nr; in folio_batch_count() 111 static inline unsigned int fbatch_space(struct folio_batch *fbatch) in fbatch_space() argument 113 return PAGEVEC_SIZE - fbatch->nr; in fbatch_space() 126 static inline unsigned folio_batch_add(struct folio_batch *fbatch, in folio_batch_add() argument 129 fbatch->folios[fbatch->nr++] = folio; in folio_batch_add() 130 return fbatch_space(fbatch); in folio_batch_add() [all …]
|
D | pagemap.h | 720 pgoff_t end, struct folio_batch *fbatch); 722 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch); 1109 struct folio_batch *fbatch);
|
D | swap.h | 453 void check_move_unevictable_folios(struct folio_batch *fbatch);
|
/linux-6.1.9/fs/ramfs/ |
D | file-nommu.c | 208 struct folio_batch fbatch; in ramfs_nommu_get_unmapped_area() local 224 folio_batch_init(&fbatch); in ramfs_nommu_get_unmapped_area() 228 ULONG_MAX, &fbatch); in ramfs_nommu_get_unmapped_area() 235 ret = (unsigned long) folio_address(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 236 pfn = folio_pfn(fbatch.folios[0]); in ramfs_nommu_get_unmapped_area() 240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { in ramfs_nommu_get_unmapped_area() 244 nr_pages += folio_nr_pages(fbatch.folios[loop]); in ramfs_nommu_get_unmapped_area() 250 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area() 256 folio_batch_release(&fbatch); in ramfs_nommu_get_unmapped_area()
|
/linux-6.1.9/fs/nilfs2/ |
D | page.c | 297 struct folio_batch fbatch; in nilfs_copy_back_pages() local 301 folio_batch_init(&fbatch); in nilfs_copy_back_pages() 303 n = filemap_get_folios(smap, &start, ~0UL, &fbatch); in nilfs_copy_back_pages() 307 for (i = 0; i < folio_batch_count(&fbatch); i++) { in nilfs_copy_back_pages() 308 struct folio *folio = fbatch.folios[i], *dfolio; in nilfs_copy_back_pages() 347 folio_batch_release(&fbatch); in nilfs_copy_back_pages() 486 struct folio_batch fbatch; in nilfs_find_uncommitted_extent() local 494 folio_batch_init(&fbatch); in nilfs_find_uncommitted_extent() 498 &fbatch); in nilfs_find_uncommitted_extent() 504 folio = fbatch.folios[i]; in nilfs_find_uncommitted_extent() [all …]
|
/linux-6.1.9/fs/btrfs/tests/ |
D | extent-io-tests.c | 24 struct folio_batch fbatch; in process_page_range() local 31 folio_batch_init(&fbatch); in process_page_range() 35 end_index, &fbatch); in process_page_range() 37 struct folio *folio = fbatch.folios[i]; in process_page_range() 47 folio_batch_release(&fbatch); in process_page_range()
|
/linux-6.1.9/fs/hugetlbfs/ |
D | inode.c | 623 struct folio_batch fbatch; in remove_inode_hugepages() local 628 folio_batch_init(&fbatch); in remove_inode_hugepages() 630 while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { in remove_inode_hugepages() 631 for (i = 0; i < folio_batch_count(&fbatch); ++i) { in remove_inode_hugepages() 632 struct folio *folio = fbatch.folios[i]; in remove_inode_hugepages() 648 folio_batch_release(&fbatch); in remove_inode_hugepages()
|
/linux-6.1.9/fs/btrfs/ |
D | compression.c | 223 struct folio_batch fbatch; in end_compressed_writeback() local 231 folio_batch_init(&fbatch); in end_compressed_writeback() 234 &fbatch); in end_compressed_writeback() 240 struct folio *folio = fbatch.folios[i]; in end_compressed_writeback() 247 folio_batch_release(&fbatch); in end_compressed_writeback()
|
D | extent_io.c | 274 struct folio_batch fbatch; in __process_pages_contig() local 286 folio_batch_init(&fbatch); in __process_pages_contig() 291 end_index, &fbatch); in __process_pages_contig() 305 struct folio *folio = fbatch.folios[i]; in __process_pages_contig() 311 folio_batch_release(&fbatch); in __process_pages_contig() 316 folio_batch_release(&fbatch); in __process_pages_contig()
|
/linux-6.1.9/fs/ |
D | buffer.c | 1592 struct folio_batch fbatch; in clean_bdev_aliases() local 1600 folio_batch_init(&fbatch); in clean_bdev_aliases() 1601 while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) { in clean_bdev_aliases() 1602 count = folio_batch_count(&fbatch); in clean_bdev_aliases() 1604 struct folio *folio = fbatch.folios[i]; in clean_bdev_aliases() 1633 folio_batch_release(&fbatch); in clean_bdev_aliases()
|
/linux-6.1.9/fs/f2fs/ |
D | compress.c | 1905 struct folio_batch fbatch; in f2fs_invalidate_compress_pages() local 1912 folio_batch_init(&fbatch); in f2fs_invalidate_compress_pages() 1917 nr = filemap_get_folios(mapping, &index, end - 1, &fbatch); in f2fs_invalidate_compress_pages() 1922 struct folio *folio = fbatch.folios[i]; in f2fs_invalidate_compress_pages() 1938 folio_batch_release(&fbatch); in f2fs_invalidate_compress_pages()
|
/linux-6.1.9/fs/ext4/ |
D | inode.c | 1575 struct folio_batch fbatch; in mpage_release_unused_pages() local 1600 folio_batch_init(&fbatch); in mpage_release_unused_pages() 1602 nr = filemap_get_folios(mapping, &index, end, &fbatch); in mpage_release_unused_pages() 1606 struct folio *folio = fbatch.folios[i]; in mpage_release_unused_pages() 1623 folio_batch_release(&fbatch); in mpage_release_unused_pages() 2340 struct folio_batch fbatch; in mpage_map_and_submit_buffers() local 2355 folio_batch_init(&fbatch); in mpage_map_and_submit_buffers() 2357 nr = filemap_get_folios(inode->i_mapping, &start, end, &fbatch); in mpage_map_and_submit_buffers() 2361 struct page *page = &fbatch.folios[i]->page; in mpage_map_and_submit_buffers() 2377 folio_batch_release(&fbatch); in mpage_map_and_submit_buffers() [all …]
|