Lines Matching refs:fbatch
61 struct folio_batch *fbatch, pgoff_t *indices) in truncate_folio_batch_exceptionals() argument
70 for (j = 0; j < folio_batch_count(fbatch); j++) in truncate_folio_batch_exceptionals()
71 if (xa_is_value(fbatch->folios[j])) in truncate_folio_batch_exceptionals()
74 if (j == folio_batch_count(fbatch)) in truncate_folio_batch_exceptionals()
83 for (i = j; i < folio_batch_count(fbatch); i++) { in truncate_folio_batch_exceptionals()
84 struct folio *folio = fbatch->folios[i]; in truncate_folio_batch_exceptionals()
88 fbatch->folios[j++] = folio; in truncate_folio_batch_exceptionals()
106 fbatch->nr = j; in truncate_folio_batch_exceptionals()
335 struct folio_batch fbatch; in truncate_inode_pages_range() local
362 folio_batch_init(&fbatch); in truncate_inode_pages_range()
365 &fbatch, indices)) { in truncate_inode_pages_range()
366 index = indices[folio_batch_count(&fbatch) - 1] + 1; in truncate_inode_pages_range()
367 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); in truncate_inode_pages_range()
368 for (i = 0; i < folio_batch_count(&fbatch); i++) in truncate_inode_pages_range()
369 truncate_cleanup_folio(fbatch.folios[i]); in truncate_inode_pages_range()
370 delete_from_page_cache_batch(mapping, &fbatch); in truncate_inode_pages_range()
371 for (i = 0; i < folio_batch_count(&fbatch); i++) in truncate_inode_pages_range()
372 folio_unlock(fbatch.folios[i]); in truncate_inode_pages_range()
373 folio_batch_release(&fbatch); in truncate_inode_pages_range()
404 if (!find_get_entries(mapping, index, end - 1, &fbatch, in truncate_inode_pages_range()
414 for (i = 0; i < folio_batch_count(&fbatch); i++) { in truncate_inode_pages_range()
415 struct folio *folio = fbatch.folios[i]; in truncate_inode_pages_range()
430 truncate_folio_batch_exceptionals(mapping, &fbatch, indices); in truncate_inode_pages_range()
431 folio_batch_release(&fbatch); in truncate_inode_pages_range()
506 struct folio_batch fbatch; in invalidate_mapping_pagevec() local
512 folio_batch_init(&fbatch); in invalidate_mapping_pagevec()
513 while (find_lock_entries(mapping, index, end, &fbatch, indices)) { in invalidate_mapping_pagevec()
514 for (i = 0; i < folio_batch_count(&fbatch); i++) { in invalidate_mapping_pagevec()
515 struct folio *folio = fbatch.folios[i]; in invalidate_mapping_pagevec()
542 folio_batch_remove_exceptionals(&fbatch); in invalidate_mapping_pagevec()
543 folio_batch_release(&fbatch); in invalidate_mapping_pagevec()
632 struct folio_batch fbatch; in invalidate_inode_pages2_range() local
642 folio_batch_init(&fbatch); in invalidate_inode_pages2_range()
644 while (find_get_entries(mapping, index, end, &fbatch, indices)) { in invalidate_inode_pages2_range()
645 for (i = 0; i < folio_batch_count(&fbatch); i++) { in invalidate_inode_pages2_range()
646 struct folio *folio = fbatch.folios[i]; in invalidate_inode_pages2_range()
689 folio_batch_remove_exceptionals(&fbatch); in invalidate_inode_pages2_range()
690 folio_batch_release(&fbatch); in invalidate_inode_pages2_range()