Home
last modified time | relevance | path

Searched refs:folio (Results 1 – 25 of 335) sorted by relevance

12345678910>>...14

/linux-6.6.21/mm/
Dswap.c81 static void __page_cache_release(struct folio *folio) in __page_cache_release() argument
83 if (folio_test_lru(folio)) { in __page_cache_release()
87 lruvec = folio_lruvec_lock_irqsave(folio, &flags); in __page_cache_release()
88 lruvec_del_folio(lruvec, folio); in __page_cache_release()
89 __folio_clear_lru_flags(folio); in __page_cache_release()
93 if (unlikely(folio_test_mlocked(folio))) { in __page_cache_release()
94 long nr_pages = folio_nr_pages(folio); in __page_cache_release()
96 __folio_clear_mlocked(folio); in __page_cache_release()
97 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); in __page_cache_release()
102 static void __folio_put_small(struct folio *folio) in __folio_put_small() argument
[all …]
Dfilemap.c127 struct folio *folio, void *shadow) in page_cache_delete() argument
129 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
135 if (!folio_test_hugetlb(folio)) { in page_cache_delete()
136 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete()
137 nr = folio_nr_pages(folio); in page_cache_delete()
140 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in page_cache_delete()
145 folio->mapping = NULL; in page_cache_delete()
151 struct folio *folio) in filemap_unaccount_folio() argument
155 VM_BUG_ON_FOLIO(folio_mapped(folio), folio); in filemap_unaccount_folio()
156 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) { in filemap_unaccount_folio()
[all …]
Dtruncate.c83 struct folio *folio = fbatch->folios[i]; in truncate_folio_batch_exceptionals() local
86 if (!xa_is_value(folio)) { in truncate_folio_batch_exceptionals()
87 fbatch->folios[j++] = folio; in truncate_folio_batch_exceptionals()
96 __clear_shadow_entry(mapping, index, folio); in truncate_folio_batch_exceptionals()
153 void folio_invalidate(struct folio *folio, size_t offset, size_t length) in folio_invalidate() argument
155 const struct address_space_operations *aops = folio->mapping->a_ops; in folio_invalidate()
158 aops->invalidate_folio(folio, offset, length); in folio_invalidate()
172 static void truncate_cleanup_folio(struct folio *folio) in truncate_cleanup_folio() argument
174 if (folio_mapped(folio)) in truncate_cleanup_folio()
175 unmap_mapping_folio(folio); in truncate_cleanup_folio()
[all …]
Drmap.c494 struct anon_vma *folio_get_anon_vma(struct folio *folio) in folio_get_anon_vma() argument
500 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_get_anon_vma()
503 if (!folio_mapped(folio)) in folio_get_anon_vma()
519 if (!folio_mapped(folio)) { in folio_get_anon_vma()
538 struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, in folio_lock_anon_vma_read() argument
546 anon_mapping = (unsigned long)READ_ONCE(folio->mapping); in folio_lock_anon_vma_read()
549 if (!folio_mapped(folio)) in folio_lock_anon_vma_read()
560 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read()
579 if (!folio_mapped(folio)) { in folio_lock_anon_vma_read()
747 struct folio *folio = page_folio(page); in page_address_in_vma() local
[all …]
Dpage_idle.c34 static struct folio *page_idle_get_folio(unsigned long pfn) in page_idle_get_folio()
37 struct folio *folio; in page_idle_get_folio() local
42 folio = page_folio(page); in page_idle_get_folio()
43 if (!folio_test_lru(folio) || !folio_try_get(folio)) in page_idle_get_folio()
45 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in page_idle_get_folio()
46 folio_put(folio); in page_idle_get_folio()
47 folio = NULL; in page_idle_get_folio()
49 return folio; in page_idle_get_folio()
52 static bool page_idle_clear_pte_refs_one(struct folio *folio, in page_idle_clear_pte_refs_one() argument
56 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in page_idle_clear_pte_refs_one()
[all …]
Dmlock.c61 static struct lruvec *__mlock_folio(struct folio *folio, struct lruvec *lruvec) in __mlock_folio() argument
64 if (!folio_test_clear_lru(folio)) in __mlock_folio()
67 lruvec = folio_lruvec_relock_irq(folio, lruvec); in __mlock_folio()
69 if (unlikely(folio_evictable(folio))) { in __mlock_folio()
75 if (folio_test_unevictable(folio)) { in __mlock_folio()
76 lruvec_del_folio(lruvec, folio); in __mlock_folio()
77 folio_clear_unevictable(folio); in __mlock_folio()
78 lruvec_add_folio(lruvec, folio); in __mlock_folio()
81 folio_nr_pages(folio)); in __mlock_folio()
86 if (folio_test_unevictable(folio)) { in __mlock_folio()
[all …]
Dswap_state.c86 int add_to_swap_cache(struct folio *folio, swp_entry_t entry, in add_to_swap_cache() argument
91 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); in add_to_swap_cache()
92 unsigned long i, nr = folio_nr_pages(folio); in add_to_swap_cache()
97 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in add_to_swap_cache()
98 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); in add_to_swap_cache()
99 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); in add_to_swap_cache()
101 folio_ref_add(folio, nr); in add_to_swap_cache()
102 folio_set_swapcache(folio); in add_to_swap_cache()
103 folio->swap = entry; in add_to_swap_cache()
111 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio); in add_to_swap_cache()
[all …]
Dmigrate.c62 struct folio *folio = folio_get_nontail_page(page); in isolate_movable_page() local
74 if (!folio) in isolate_movable_page()
77 if (unlikely(folio_test_slab(folio))) in isolate_movable_page()
86 if (unlikely(!__folio_test_movable(folio))) in isolate_movable_page()
90 if (unlikely(folio_test_slab(folio))) in isolate_movable_page()
104 if (unlikely(!folio_trylock(folio))) in isolate_movable_page()
107 if (!folio_test_movable(folio) || folio_test_isolated(folio)) in isolate_movable_page()
110 mops = folio_movable_ops(folio); in isolate_movable_page()
111 VM_BUG_ON_FOLIO(!mops, folio); in isolate_movable_page()
113 if (!mops->isolate_page(&folio->page, mode)) in isolate_movable_page()
[all …]
/linux-6.6.21/include/linux/
Dpagemap.h386 struct address_space *folio_mapping(struct folio *);
387 struct address_space *swapcache_mapping(struct folio *);
401 static inline struct address_space *folio_file_mapping(struct folio *folio) in folio_file_mapping() argument
403 if (unlikely(folio_test_swapcache(folio))) in folio_file_mapping()
404 return swapcache_mapping(folio); in folio_file_mapping()
406 return folio->mapping; in folio_file_mapping()
421 static inline struct address_space *folio_flush_mapping(struct folio *folio) in folio_flush_mapping() argument
423 if (unlikely(folio_test_swapcache(folio))) in folio_flush_mapping()
426 return folio_mapping(folio); in folio_flush_mapping()
443 static inline struct inode *folio_inode(struct folio *folio) in folio_inode() argument
[all …]
Dhugetlb_cgroup.h68 __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd) in __hugetlb_cgroup_from_folio() argument
70 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __hugetlb_cgroup_from_folio()
71 if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER) in __hugetlb_cgroup_from_folio()
74 return folio->_hugetlb_cgroup_rsvd; in __hugetlb_cgroup_from_folio()
76 return folio->_hugetlb_cgroup; in __hugetlb_cgroup_from_folio()
79 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio) in hugetlb_cgroup_from_folio() argument
81 return __hugetlb_cgroup_from_folio(folio, false); in hugetlb_cgroup_from_folio()
85 hugetlb_cgroup_from_folio_rsvd(struct folio *folio) in hugetlb_cgroup_from_folio_rsvd() argument
87 return __hugetlb_cgroup_from_folio(folio, true); in hugetlb_cgroup_from_folio_rsvd()
90 static inline void __set_hugetlb_cgroup(struct folio *folio, in __set_hugetlb_cgroup() argument
[all …]
Dmm_inline.h27 static inline int folio_is_file_lru(struct folio *folio) in folio_is_file_lru() argument
29 return !folio_test_swapbacked(folio); in folio_is_file_lru()
65 static __always_inline void __folio_clear_lru_flags(struct folio *folio) in __folio_clear_lru_flags() argument
67 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); in __folio_clear_lru_flags()
69 __folio_clear_lru(folio); in __folio_clear_lru_flags()
72 if (folio_test_active(folio) && folio_test_unevictable(folio)) in __folio_clear_lru_flags()
75 __folio_clear_active(folio); in __folio_clear_lru_flags()
76 __folio_clear_unevictable(folio); in __folio_clear_lru_flags()
86 static __always_inline enum lru_list folio_lru_list(struct folio *folio) in folio_lru_list() argument
90 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); in folio_lru_list()
[all …]
Dpage_ref.h87 static inline int folio_ref_count(const struct folio *folio) in folio_ref_count() argument
89 return page_ref_count(&folio->page); in folio_ref_count()
104 static inline void folio_set_count(struct folio *folio, int v) in folio_set_count() argument
106 set_page_count(&folio->page, v); in folio_set_count()
125 static inline void folio_ref_add(struct folio *folio, int nr) in folio_ref_add() argument
127 page_ref_add(&folio->page, nr); in folio_ref_add()
137 static inline void folio_ref_sub(struct folio *folio, int nr) in folio_ref_sub() argument
139 page_ref_sub(&folio->page, nr); in folio_ref_sub()
151 static inline int folio_ref_sub_return(struct folio *folio, int nr) in folio_ref_sub_return() argument
153 return page_ref_sub_return(&folio->page, nr); in folio_ref_sub_return()
[all …]
Dpage_idle.h16 static inline bool folio_test_young(struct folio *folio) in folio_test_young() argument
18 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_test_young()
30 static inline void folio_set_young(struct folio *folio) in folio_set_young() argument
32 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_set_young()
41 static inline bool folio_test_clear_young(struct folio *folio) in folio_test_clear_young() argument
43 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_test_clear_young()
55 static inline bool folio_test_idle(struct folio *folio) in folio_test_idle() argument
57 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_test_idle()
69 static inline void folio_set_idle(struct folio *folio) in folio_set_idle() argument
71 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_set_idle()
[all …]
Dmigrate.h10 typedef struct folio *new_folio_t(struct folio *folio, unsigned long private);
11 typedef void free_folio_t(struct folio *folio, unsigned long private);
66 int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
67 struct folio *src, enum migrate_mode mode, int extra_count);
68 int migrate_folio(struct address_space *mapping, struct folio *dst,
69 struct folio *src, enum migrate_mode mode);
73 struct folio *alloc_migration_target(struct folio *src, unsigned long private);
77 struct folio *dst, struct folio *src);
80 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
81 void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
[all …]
Dmemcontrol.h359 static inline bool folio_memcg_kmem(struct folio *folio);
383 static inline struct mem_cgroup *__folio_memcg(struct folio *folio) in __folio_memcg() argument
385 unsigned long memcg_data = folio->memcg_data; in __folio_memcg()
387 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); in __folio_memcg()
388 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); in __folio_memcg()
389 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); in __folio_memcg()
404 static inline struct obj_cgroup *__folio_objcg(struct folio *folio) in __folio_objcg() argument
406 unsigned long memcg_data = folio->memcg_data; in __folio_objcg()
408 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); in __folio_objcg()
409 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); in __folio_objcg()
[all …]
Dpage-flags.h270 const struct page *: (const struct folio *)_compound_head(p), \
271 struct page *: (struct folio *)_compound_head(p)))
282 #define folio_page(folio, n) nth_page(&(folio)->page, n) argument
309 static unsigned long *folio_flags(struct folio *folio, unsigned n) in folio_flags() argument
311 struct page *page = &folio->page; in folio_flags()
374 static __always_inline bool folio_test_##lname(struct folio *folio) \
375 { return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
381 void folio_set_##lname(struct folio *folio) \
382 { set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
388 void folio_clear_##lname(struct folio *folio) \
[all …]
Dswap.h338 struct folio *folio = page_folio(page); in page_swap_entry() local
339 swp_entry_t entry = folio->swap; in page_swap_entry()
341 entry.val += folio_page_idx(folio, page); in page_swap_entry()
348 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
349 void workingset_refault(struct folio *folio, void *shadow);
350 void workingset_activation(struct folio *folio);
372 void lru_note_cost_refault(struct folio *);
373 void folio_add_lru(struct folio *);
374 void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
376 void folio_mark_accessed(struct folio *);
[all …]
/linux-6.6.21/fs/afs/
Dwrite.c30 bool afs_dirty_folio(struct address_space *mapping, struct folio *folio) in afs_dirty_folio() argument
32 return fscache_dirty_folio(mapping, folio, in afs_dirty_folio()
35 static void afs_folio_start_fscache(bool caching, struct folio *folio) in afs_folio_start_fscache() argument
38 folio_start_fscache(folio); in afs_folio_start_fscache()
41 static void afs_folio_start_fscache(bool caching, struct folio *folio) in afs_folio_start_fscache() argument
51 struct folio *folio) in afs_flush_conflicting_write() argument
56 .range_start = folio_pos(folio), in afs_flush_conflicting_write()
61 return afs_writepages_region(mapping, &wbc, folio_pos(folio), LLONG_MAX, in afs_flush_conflicting_write()
73 struct folio *folio; in afs_write_begin() local
87 ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata); in afs_write_begin()
[all …]
/linux-6.6.21/fs/iomap/
Dbuffered-io.c46 static inline bool ifs_is_fully_uptodate(struct folio *folio, in ifs_is_fully_uptodate() argument
49 struct inode *inode = folio->mapping->host; in ifs_is_fully_uptodate()
51 return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio)); in ifs_is_fully_uptodate()
60 static void ifs_set_range_uptodate(struct folio *folio, in ifs_set_range_uptodate() argument
63 struct inode *inode = folio->mapping->host; in ifs_set_range_uptodate()
71 if (ifs_is_fully_uptodate(folio, ifs)) in ifs_set_range_uptodate()
72 folio_mark_uptodate(folio); in ifs_set_range_uptodate()
76 static void iomap_set_range_uptodate(struct folio *folio, size_t off, in iomap_set_range_uptodate() argument
79 struct iomap_folio_state *ifs = folio->private; in iomap_set_range_uptodate()
82 ifs_set_range_uptodate(folio, ifs, off, len); in iomap_set_range_uptodate()
[all …]
/linux-6.6.21/mm/damon/
Dpaddr.c19 static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma, in __damon_pa_mkold() argument
22 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in __damon_pa_mkold()
36 struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); in damon_pa_mkold() local
43 if (!folio) in damon_pa_mkold()
46 if (!folio_mapped(folio) || !folio_raw_mapping(folio)) { in damon_pa_mkold()
47 folio_set_idle(folio); in damon_pa_mkold()
51 need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); in damon_pa_mkold()
52 if (need_lock && !folio_trylock(folio)) in damon_pa_mkold()
55 rmap_walk(folio, &rwc); in damon_pa_mkold()
58 folio_unlock(folio); in damon_pa_mkold()
[all …]
Dops-common.c22 struct folio *damon_get_folio(unsigned long pfn) in damon_get_folio()
25 struct folio *folio; in damon_get_folio() local
30 folio = page_folio(page); in damon_get_folio()
31 if (!folio_test_lru(folio) || !folio_try_get(folio)) in damon_get_folio()
33 if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) { in damon_get_folio()
34 folio_put(folio); in damon_get_folio()
35 folio = NULL; in damon_get_folio()
37 return folio; in damon_get_folio()
42 struct folio *folio = damon_get_folio(pte_pfn(ptep_get(pte))); in damon_ptep_mkold() local
44 if (!folio) in damon_ptep_mkold()
[all …]
/linux-6.6.21/include/trace/events/
Dpagemap.h19 #define trace_pagemap_flags(folio) ( \ argument
20 (folio_test_anon(folio) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
21 (folio_mapped(folio) ? PAGEMAP_MAPPED : 0) | \
22 (folio_test_swapcache(folio) ? PAGEMAP_SWAPCACHE : 0) | \
23 (folio_test_swapbacked(folio) ? PAGEMAP_SWAPBACKED : 0) | \
24 (folio_test_mappedtodisk(folio) ? PAGEMAP_MAPPEDDISK : 0) | \
25 (folio_test_private(folio) ? PAGEMAP_BUFFERS : 0) \
30 TP_PROTO(struct folio *folio),
32 TP_ARGS(folio),
35 __field(struct folio *, folio )
[all …]
/linux-6.6.21/fs/9p/
Dvfs_addr.c115 static bool v9fs_release_folio(struct folio *folio, gfp_t gfp) in v9fs_release_folio() argument
117 if (folio_test_private(folio)) in v9fs_release_folio()
120 if (folio_test_fscache(folio)) { in v9fs_release_folio()
123 folio_wait_fscache(folio); in v9fs_release_folio()
125 fscache_note_page_release(v9fs_inode_cookie(V9FS_I(folio_inode(folio)))); in v9fs_release_folio()
130 static void v9fs_invalidate_folio(struct folio *folio, size_t offset, in v9fs_invalidate_folio() argument
133 folio_wait_fscache(folio); in v9fs_invalidate_folio()
152 static int v9fs_vfs_write_folio_locked(struct folio *folio) in v9fs_vfs_write_folio_locked() argument
154 struct inode *inode = folio_inode(folio); in v9fs_vfs_write_folio_locked()
155 loff_t start = folio_pos(folio); in v9fs_vfs_write_folio_locked()
[all …]
/linux-6.6.21/fs/netfs/
Dbuffered_read.c19 struct folio *folio; in netfs_rreq_unlock_folios() local
47 xas_for_each(&xas, folio, last_page) { in netfs_rreq_unlock_folios()
52 if (xas_retry(&xas, folio)) in netfs_rreq_unlock_folios()
55 pg_end = folio_pos(folio) + folio_size(folio) - 1; in netfs_rreq_unlock_folios()
66 folio_start_fscache(folio); in netfs_rreq_unlock_folios()
88 flush_dcache_folio(folio); in netfs_rreq_unlock_folios()
89 folio_mark_uptodate(folio); in netfs_rreq_unlock_folios()
93 if (folio_index(folio) == rreq->no_unlock_folio && in netfs_rreq_unlock_folios()
97 folio_unlock(folio); in netfs_rreq_unlock_folios()
224 int netfs_read_folio(struct file *file, struct folio *folio) in netfs_read_folio() argument
[all …]
/linux-6.6.21/fs/
Dmpage.c53 folio_set_error(fi.folio); in mpage_read_end_io()
55 folio_mark_uptodate(fi.folio); in mpage_read_end_io()
56 folio_unlock(fi.folio); in mpage_read_end_io()
69 folio_set_error(fi.folio); in mpage_write_end_io()
70 mapping_set_error(fi.folio->mapping, err); in mpage_write_end_io()
72 folio_end_writeback(fi.folio); in mpage_write_end_io()
104 static void map_buffer_to_folio(struct folio *folio, struct buffer_head *bh, in map_buffer_to_folio() argument
107 struct inode *inode = folio->mapping->host; in map_buffer_to_folio()
111 head = folio_buffers(folio); in map_buffer_to_folio()
119 folio_mark_uptodate(folio); in map_buffer_to_folio()
[all …]

12345678910>>...14