1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Macros for manipulating and testing page->flags
4  */
5 
6 #ifndef PAGE_FLAGS_H
7 #define PAGE_FLAGS_H
8 
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
16 
17 /*
18  * Various page->flags bits:
19  *
20  * PG_reserved is set for special pages. The "struct page" of such a page
21  * should in general not be touched (e.g. set dirty) except by its owner.
22  * Pages marked as PG_reserved include:
23  * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24  *   initrd, HW tables)
25  * - Pages reserved or allocated early during boot (before the page allocator
26  *   was initialized). This includes (depending on the architecture) the
27  *   initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28  *   much more. Once (if ever) freed, PG_reserved is cleared and they will
29  *   be given to the page allocator.
30  * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31  *   to read/write these pages might end badly. Don't touch!
32  * - The zero page(s)
33  * - Pages not added to the page allocator when onlining a section because
34  *   they were excluded via the online_page_callback() or because they are
35  *   PG_hwpoison.
36  * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37  *   control pages, vmcoreinfo)
38  * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39  *   not marked PG_reserved (as they might be in use by somebody else who does
40  *   not respect the caching strategy).
41  * - Pages part of an offline section (struct pages of offline sections should
42  *   not be trusted as they will be initialized when first onlined).
43  * - MCA pages on ia64
44  * - Pages holding CPU notes for POWER Firmware Assisted Dump
45  * - Device memory (e.g. PMEM, DAX, HMM)
46  * Some PG_reserved pages will be excluded from the hibernation image.
47  * PG_reserved does in general not hinder anybody from dumping or swapping
48  * and is no longer required for remap_pfn_range(). ioremap might require it.
49  * Consequently, PG_reserved for a page mapped into user space can indicate
50  * the zero page, the vDSO, MMIO pages or device memory.
51  *
52  * The PG_private bitflag is set on pagecache pages if they contain filesystem
53  * specific data (which is normally at page->private). It can be used by
54  * private allocations for its own usage.
55  *
56  * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57  * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58  * is set before writeback starts and cleared when it finishes.
59  *
60  * PG_locked also pins a page in pagecache, and blocks truncation of the file
61  * while it is held.
62  *
63  * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64  * to become unlocked.
65  *
66  * PG_swapbacked is set when a page uses swap as a backing storage.  This are
67  * usually PageAnon or shmem pages but please note that even anonymous pages
68  * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69  * a result of MADV_FREE).
70  *
71  * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
72  * file-backed pagecache (see mm/vmscan.c).
73  *
74  * PG_error is set to indicate that an I/O error occurred on this page.
75  *
76  * PG_arch_1 is an architecture specific page state bit.  The generic code
77  * guarantees that this bit is cleared for a page when it first is entered into
78  * the page cache.
79  *
80  * PG_hwpoison indicates that a page got corrupted in hardware and contains
81  * data with incorrect ECC bits that triggered a machine check. Accessing is
82  * not safe since it may cause another machine check. Don't touch!
83  */
84 
85 /*
86  * Don't use the pageflags directly.  Use the PageFoo macros.
87  *
88  * The page flags field is split into two parts, the main flags area
89  * which extends from the low bits upwards, and the fields area which
90  * extends from the high bits downwards.
91  *
92  *  | FIELD | ... | FLAGS |
93  *  N-1           ^       0
94  *               (NR_PAGEFLAGS)
95  *
96  * The fields area is reserved for fields mapping zone, node (for NUMA) and
97  * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
98  * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
99  */
100 enum pageflags {
101 	PG_locked,		/* Page is locked. Don't touch. */
102 	PG_referenced,
103 	PG_uptodate,
104 	PG_dirty,
105 	PG_lru,
106 	PG_active,
107 	PG_workingset,
108 	PG_waiters,		/* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
109 	PG_error,
110 	PG_slab,
111 	PG_owner_priv_1,	/* Owner use. If pagecache, fs may use*/
112 	PG_arch_1,
113 	PG_reserved,
114 	PG_private,		/* If pagecache, has fs-private data */
115 	PG_private_2,		/* If pagecache, has fs aux data */
116 	PG_writeback,		/* Page is under writeback */
117 	PG_head,		/* A head page */
118 	PG_mappedtodisk,	/* Has blocks allocated on-disk */
119 	PG_reclaim,		/* To be reclaimed asap */
120 	PG_swapbacked,		/* Page is backed by RAM/swap */
121 	PG_unevictable,		/* Page is "unevictable"  */
122 #ifdef CONFIG_MMU
123 	PG_mlocked,		/* Page is vma mlocked */
124 #endif
125 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
126 	PG_uncached,		/* Page has been mapped as uncached */
127 #endif
128 #ifdef CONFIG_MEMORY_FAILURE
129 	PG_hwpoison,		/* hardware poisoned page. Don't touch */
130 #endif
131 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
132 	PG_young,
133 	PG_idle,
134 #endif
135 #ifdef CONFIG_64BIT
136 	PG_arch_2,
137 #endif
138 #ifdef CONFIG_KASAN_HW_TAGS
139 	PG_skip_kasan_poison,
140 #endif
141 	__NR_PAGEFLAGS,
142 
143 	PG_readahead = PG_reclaim,
144 
145 	/*
146 	 * Depending on the way an anonymous folio can be mapped into a page
147 	 * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped
148 	 * THP), PG_anon_exclusive may be set only for the head page or for
149 	 * tail pages of an anonymous folio. For now, we only expect it to be
150 	 * set on tail pages for PTE-mapped THP.
151 	 */
152 	PG_anon_exclusive = PG_mappedtodisk,
153 
154 	/* Filesystems */
155 	PG_checked = PG_owner_priv_1,
156 
157 	/* SwapBacked */
158 	PG_swapcache = PG_owner_priv_1,	/* Swap page: swp_entry_t in private */
159 
160 	/* Two page bits are conscripted by FS-Cache to maintain local caching
161 	 * state.  These bits are set on pages belonging to the netfs's inodes
162 	 * when those inodes are being locally cached.
163 	 */
164 	PG_fscache = PG_private_2,	/* page backed by cache */
165 
166 	/* XEN */
167 	/* Pinned in Xen as a read-only pagetable page. */
168 	PG_pinned = PG_owner_priv_1,
169 	/* Pinned as part of domain save (see xen_mm_pin_all()). */
170 	PG_savepinned = PG_dirty,
171 	/* Has a grant mapping of another (foreign) domain's page. */
172 	PG_foreign = PG_owner_priv_1,
173 	/* Remapped by swiotlb-xen. */
174 	PG_xen_remapped = PG_owner_priv_1,
175 
176 	/* SLOB */
177 	PG_slob_free = PG_private,
178 
179 	/* Compound pages. Stored in first tail page's flags */
180 	PG_double_map = PG_workingset,
181 
182 #ifdef CONFIG_MEMORY_FAILURE
183 	/*
184 	 * Compound pages. Stored in first tail page's flags.
185 	 * Indicates that at least one subpage is hwpoisoned in the
186 	 * THP.
187 	 */
188 	PG_has_hwpoisoned = PG_error,
189 #endif
190 
191 	/* non-lru isolated movable page */
192 	PG_isolated = PG_reclaim,
193 
194 	/* Only valid for buddy pages. Used to track pages that are reported */
195 	PG_reported = PG_uptodate,
196 };
197 
198 #define PAGEFLAGS_MASK		((1UL << NR_PAGEFLAGS) - 1)
199 
200 #ifndef __GENERATING_BOUNDS_H
201 
202 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
203 DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
204 			 hugetlb_optimize_vmemmap_key);
205 
hugetlb_optimize_vmemmap_enabled(void)206 static __always_inline bool hugetlb_optimize_vmemmap_enabled(void)
207 {
208 	return static_branch_maybe(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
209 				   &hugetlb_optimize_vmemmap_key);
210 }
211 
212 /*
213  * If the feature of optimizing vmemmap pages associated with each HugeTLB
214  * page is enabled, the head vmemmap page frame is reused and all of the tail
215  * vmemmap addresses map to the head vmemmap page frame (furture details can
216  * refer to the figure at the head of the mm/hugetlb_vmemmap.c).  In other
217  * words, there are more than one page struct with PG_head associated with each
218  * HugeTLB page.  We __know__ that there is only one head page struct, the tail
219  * page structs with PG_head are fake head page structs.  We need an approach
220  * to distinguish between those two different types of page structs so that
221  * compound_head() can return the real head page struct when the parameter is
222  * the tail page struct but with PG_head.
223  *
224  * The page_fixed_fake_head() returns the real head page struct if the @page is
225  * fake page head, otherwise, returns @page which can either be a true page
226  * head or tail.
227  */
page_fixed_fake_head(const struct page * page)228 static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
229 {
230 	if (!hugetlb_optimize_vmemmap_enabled())
231 		return page;
232 
233 	/*
234 	 * Only addresses aligned with PAGE_SIZE of struct page may be fake head
235 	 * struct page. The alignment check aims to avoid access the fields (
236 	 * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly)
237 	 * cold cacheline in some cases.
238 	 */
239 	if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
240 	    test_bit(PG_head, &page->flags)) {
241 		/*
242 		 * We can safely access the field of the @page[1] with PG_head
243 		 * because the @page is a compound page composed with at least
244 		 * two contiguous pages.
245 		 */
246 		unsigned long head = READ_ONCE(page[1].compound_head);
247 
248 		if (likely(head & 1))
249 			return (const struct page *)(head - 1);
250 	}
251 	return page;
252 }
253 #else
page_fixed_fake_head(const struct page * page)254 static inline const struct page *page_fixed_fake_head(const struct page *page)
255 {
256 	return page;
257 }
258 
hugetlb_optimize_vmemmap_enabled(void)259 static inline bool hugetlb_optimize_vmemmap_enabled(void)
260 {
261 	return false;
262 }
263 #endif
264 
page_is_fake_head(struct page * page)265 static __always_inline int page_is_fake_head(struct page *page)
266 {
267 	return page_fixed_fake_head(page) != page;
268 }
269 
_compound_head(const struct page * page)270 static inline unsigned long _compound_head(const struct page *page)
271 {
272 	unsigned long head = READ_ONCE(page->compound_head);
273 
274 	if (unlikely(head & 1))
275 		return head - 1;
276 	return (unsigned long)page_fixed_fake_head(page);
277 }
278 
279 #define compound_head(page)	((typeof(page))_compound_head(page))
280 
281 /**
282  * page_folio - Converts from page to folio.
283  * @p: The page.
284  *
285  * Every page is part of a folio.  This function cannot be called on a
286  * NULL pointer.
287  *
288  * Context: No reference, nor lock is required on @page.  If the caller
289  * does not hold a reference, this call may race with a folio split, so
290  * it should re-check the folio still contains this page after gaining
291  * a reference on the folio.
292  * Return: The folio which contains this page.
293  */
294 #define page_folio(p)		(_Generic((p),				\
295 	const struct page *:	(const struct folio *)_compound_head(p), \
296 	struct page *:		(struct folio *)_compound_head(p)))
297 
298 /**
299  * folio_page - Return a page from a folio.
300  * @folio: The folio.
301  * @n: The page number to return.
302  *
303  * @n is relative to the start of the folio.  This function does not
304  * check that the page number lies within @folio; the caller is presumed
305  * to have a reference to the page.
306  */
307 #define folio_page(folio, n)	nth_page(&(folio)->page, n)
308 
PageTail(struct page * page)309 static __always_inline int PageTail(struct page *page)
310 {
311 	return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page);
312 }
313 
PageCompound(struct page * page)314 static __always_inline int PageCompound(struct page *page)
315 {
316 	return test_bit(PG_head, &page->flags) ||
317 	       READ_ONCE(page->compound_head) & 1;
318 }
319 
320 #define	PAGE_POISON_PATTERN	-1l
PagePoisoned(const struct page * page)321 static inline int PagePoisoned(const struct page *page)
322 {
323 	return READ_ONCE(page->flags) == PAGE_POISON_PATTERN;
324 }
325 
326 #ifdef CONFIG_DEBUG_VM
327 void page_init_poison(struct page *page, size_t size);
328 #else
page_init_poison(struct page * page,size_t size)329 static inline void page_init_poison(struct page *page, size_t size)
330 {
331 }
332 #endif
333 
folio_flags(struct folio * folio,unsigned n)334 static unsigned long *folio_flags(struct folio *folio, unsigned n)
335 {
336 	struct page *page = &folio->page;
337 
338 	VM_BUG_ON_PGFLAGS(PageTail(page), page);
339 	VM_BUG_ON_PGFLAGS(n > 0 && !test_bit(PG_head, &page->flags), page);
340 	return &page[n].flags;
341 }
342 
343 /*
344  * Page flags policies wrt compound pages
345  *
346  * PF_POISONED_CHECK
347  *     check if this struct page poisoned/uninitialized
348  *
349  * PF_ANY:
350  *     the page flag is relevant for small, head and tail pages.
351  *
352  * PF_HEAD:
353  *     for compound page all operations related to the page flag applied to
354  *     head page.
355  *
356  * PF_ONLY_HEAD:
357  *     for compound page, callers only ever operate on the head page.
358  *
359  * PF_NO_TAIL:
360  *     modifications of the page flag must be done on small or head pages,
361  *     checks can be done on tail pages too.
362  *
363  * PF_NO_COMPOUND:
364  *     the page flag is not relevant for compound pages.
365  *
366  * PF_SECOND:
367  *     the page flag is stored in the first tail page.
368  */
369 #define PF_POISONED_CHECK(page) ({					\
370 		VM_BUG_ON_PGFLAGS(PagePoisoned(page), page);		\
371 		page; })
372 #define PF_ANY(page, enforce)	PF_POISONED_CHECK(page)
373 #define PF_HEAD(page, enforce)	PF_POISONED_CHECK(compound_head(page))
374 #define PF_ONLY_HEAD(page, enforce) ({					\
375 		VM_BUG_ON_PGFLAGS(PageTail(page), page);		\
376 		PF_POISONED_CHECK(page); })
377 #define PF_NO_TAIL(page, enforce) ({					\
378 		VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page);	\
379 		PF_POISONED_CHECK(compound_head(page)); })
380 #define PF_NO_COMPOUND(page, enforce) ({				\
381 		VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page);	\
382 		PF_POISONED_CHECK(page); })
383 #define PF_SECOND(page, enforce) ({					\
384 		VM_BUG_ON_PGFLAGS(!PageHead(page), page);		\
385 		PF_POISONED_CHECK(&page[1]); })
386 
387 /* Which page is the flag stored in */
388 #define FOLIO_PF_ANY		0
389 #define FOLIO_PF_HEAD		0
390 #define FOLIO_PF_ONLY_HEAD	0
391 #define FOLIO_PF_NO_TAIL	0
392 #define FOLIO_PF_NO_COMPOUND	0
393 #define FOLIO_PF_SECOND		1
394 
395 /*
396  * Macros to create function definitions for page flags
397  */
398 #define TESTPAGEFLAG(uname, lname, policy)				\
399 static __always_inline bool folio_test_##lname(struct folio *folio)	\
400 { return test_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }	\
401 static __always_inline int Page##uname(struct page *page)		\
402 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
403 
404 #define SETPAGEFLAG(uname, lname, policy)				\
405 static __always_inline							\
406 void folio_set_##lname(struct folio *folio)				\
407 { set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }		\
408 static __always_inline void SetPage##uname(struct page *page)		\
409 { set_bit(PG_##lname, &policy(page, 1)->flags); }
410 
411 #define CLEARPAGEFLAG(uname, lname, policy)				\
412 static __always_inline							\
413 void folio_clear_##lname(struct folio *folio)				\
414 { clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }		\
415 static __always_inline void ClearPage##uname(struct page *page)		\
416 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
417 
418 #define __SETPAGEFLAG(uname, lname, policy)				\
419 static __always_inline							\
420 void __folio_set_##lname(struct folio *folio)				\
421 { __set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }		\
422 static __always_inline void __SetPage##uname(struct page *page)		\
423 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
424 
425 #define __CLEARPAGEFLAG(uname, lname, policy)				\
426 static __always_inline							\
427 void __folio_clear_##lname(struct folio *folio)				\
428 { __clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); }	\
429 static __always_inline void __ClearPage##uname(struct page *page)	\
430 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
431 
432 #define TESTSETFLAG(uname, lname, policy)				\
433 static __always_inline							\
434 bool folio_test_set_##lname(struct folio *folio)			\
435 { return test_and_set_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
436 static __always_inline int TestSetPage##uname(struct page *page)	\
437 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
438 
439 #define TESTCLEARFLAG(uname, lname, policy)				\
440 static __always_inline							\
441 bool folio_test_clear_##lname(struct folio *folio)			\
442 { return test_and_clear_bit(PG_##lname, folio_flags(folio, FOLIO_##policy)); } \
443 static __always_inline int TestClearPage##uname(struct page *page)	\
444 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
445 
446 #define PAGEFLAG(uname, lname, policy)					\
447 	TESTPAGEFLAG(uname, lname, policy)				\
448 	SETPAGEFLAG(uname, lname, policy)				\
449 	CLEARPAGEFLAG(uname, lname, policy)
450 
451 #define __PAGEFLAG(uname, lname, policy)				\
452 	TESTPAGEFLAG(uname, lname, policy)				\
453 	__SETPAGEFLAG(uname, lname, policy)				\
454 	__CLEARPAGEFLAG(uname, lname, policy)
455 
456 #define TESTSCFLAG(uname, lname, policy)				\
457 	TESTSETFLAG(uname, lname, policy)				\
458 	TESTCLEARFLAG(uname, lname, policy)
459 
460 #define TESTPAGEFLAG_FALSE(uname, lname)				\
461 static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
462 static inline int Page##uname(const struct page *page) { return 0; }
463 
464 #define SETPAGEFLAG_NOOP(uname, lname)					\
465 static inline void folio_set_##lname(struct folio *folio) { }		\
466 static inline void SetPage##uname(struct page *page) {  }
467 
468 #define CLEARPAGEFLAG_NOOP(uname, lname)				\
469 static inline void folio_clear_##lname(struct folio *folio) { }		\
470 static inline void ClearPage##uname(struct page *page) {  }
471 
472 #define __CLEARPAGEFLAG_NOOP(uname, lname)				\
473 static inline void __folio_clear_##lname(struct folio *folio) { }	\
474 static inline void __ClearPage##uname(struct page *page) {  }
475 
476 #define TESTSETFLAG_FALSE(uname, lname)					\
477 static inline bool folio_test_set_##lname(struct folio *folio)		\
478 { return 0; }								\
479 static inline int TestSetPage##uname(struct page *page) { return 0; }
480 
481 #define TESTCLEARFLAG_FALSE(uname, lname)				\
482 static inline bool folio_test_clear_##lname(struct folio *folio)	\
483 { return 0; }								\
484 static inline int TestClearPage##uname(struct page *page) { return 0; }
485 
486 #define PAGEFLAG_FALSE(uname, lname) TESTPAGEFLAG_FALSE(uname, lname)	\
487 	SETPAGEFLAG_NOOP(uname, lname) CLEARPAGEFLAG_NOOP(uname, lname)
488 
489 #define TESTSCFLAG_FALSE(uname, lname)					\
490 	TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname)
491 
492 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
493 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
494 PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
495 PAGEFLAG(Referenced, referenced, PF_HEAD)
496 	TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
497 	__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
498 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
499 	__CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
500 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
501 	TESTCLEARFLAG(LRU, lru, PF_HEAD)
502 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
503 	TESTCLEARFLAG(Active, active, PF_HEAD)
504 PAGEFLAG(Workingset, workingset, PF_HEAD)
505 	TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
506 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
507 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
508 PAGEFLAG(Checked, checked, PF_NO_COMPOUND)	   /* Used by some filesystems */
509 
510 /* Xen */
511 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
512 	TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
513 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
514 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
PAGEFLAG(XenRemapped,xen_remapped,PF_NO_COMPOUND)515 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
516 	TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
517 
518 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
519 	__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
520 	__SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
521 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
522 	__CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
523 	__SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
524 
525 /*
526  * Private page markings that may be used by the filesystem that owns the page
527  * for its own purposes.
528  * - PG_private and PG_private_2 cause release_folio() and co to be invoked
529  */
530 PAGEFLAG(Private, private, PF_ANY)
531 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
532 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
533 	TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
534 
535 /*
536  * Only test-and-set exist for PG_writeback.  The unconditional operators are
537  * risky: they bypass page accounting.
538  */
539 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
540 	TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
541 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
542 
543 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
544 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
545 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
546 PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND)
547 	TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND)
548 
549 #ifdef CONFIG_HIGHMEM
550 /*
551  * Must use a macro here due to header dependency issues. page_zone() is not
552  * available at this point.
553  */
554 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
555 #else
556 PAGEFLAG_FALSE(HighMem, highmem)
557 #endif
558 
559 #ifdef CONFIG_SWAP
560 static __always_inline bool folio_test_swapcache(struct folio *folio)
561 {
562 	return folio_test_swapbacked(folio) &&
563 			test_bit(PG_swapcache, folio_flags(folio, 0));
564 }
565 
PageSwapCache(struct page * page)566 static __always_inline bool PageSwapCache(struct page *page)
567 {
568 	return folio_test_swapcache(page_folio(page));
569 }
570 
571 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
572 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
573 #else
574 PAGEFLAG_FALSE(SwapCache, swapcache)
575 #endif
576 
577 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
578 	__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
579 	TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
580 
581 #ifdef CONFIG_MMU
582 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
583 	__CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
584 	TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
585 #else
586 PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked)
587 	TESTSCFLAG_FALSE(Mlocked, mlocked)
588 #endif
589 
590 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
591 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
592 #else
593 PAGEFLAG_FALSE(Uncached, uncached)
594 #endif
595 
596 #ifdef CONFIG_MEMORY_FAILURE
597 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
598 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
599 #define __PG_HWPOISON (1UL << PG_hwpoison)
600 #define MAGIC_HWPOISON	0x48575053U	/* HWPS */
601 extern void SetPageHWPoisonTakenOff(struct page *page);
602 extern void ClearPageHWPoisonTakenOff(struct page *page);
603 extern bool take_page_off_buddy(struct page *page);
604 extern bool put_page_back_buddy(struct page *page);
605 #else
606 PAGEFLAG_FALSE(HWPoison, hwpoison)
607 #define __PG_HWPOISON 0
608 #endif
609 
610 #if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
TESTPAGEFLAG(Young,young,PF_ANY)611 TESTPAGEFLAG(Young, young, PF_ANY)
612 SETPAGEFLAG(Young, young, PF_ANY)
613 TESTCLEARFLAG(Young, young, PF_ANY)
614 PAGEFLAG(Idle, idle, PF_ANY)
615 #endif
616 
617 #ifdef CONFIG_KASAN_HW_TAGS
618 PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
619 #else
620 PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison)
621 #endif
622 
623 /*
624  * PageReported() is used to track reported free pages within the Buddy
625  * allocator. We can use the non-atomic version of the test and set
626  * operations as both should be shielded with the zone lock to prevent
627  * any possible races on the setting or clearing of the bit.
628  */
629 __PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
630 
631 /*
632  * On an anonymous page mapped into a user virtual memory area,
633  * page->mapping points to its anon_vma, not to a struct address_space;
634  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
635  *
636  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
637  * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
638  * bit; and then page->mapping points, not to an anon_vma, but to a private
639  * structure which KSM associates with that merged page.  See ksm.h.
640  *
641  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
642  * page and then page->mapping points a struct address_space.
643  *
644  * Please note that, confusingly, "page_mapping" refers to the inode
645  * address_space which maps the page from disk; whereas "page_mapped"
646  * refers to user virtual address space into which the page is mapped.
647  */
648 #define PAGE_MAPPING_ANON	0x1
649 #define PAGE_MAPPING_MOVABLE	0x2
650 #define PAGE_MAPPING_KSM	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
651 #define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
652 
653 static __always_inline bool folio_mapping_flags(struct folio *folio)
654 {
655 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
656 }
657 
PageMappingFlags(struct page * page)658 static __always_inline int PageMappingFlags(struct page *page)
659 {
660 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
661 }
662 
folio_test_anon(struct folio * folio)663 static __always_inline bool folio_test_anon(struct folio *folio)
664 {
665 	return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
666 }
667 
PageAnon(struct page * page)668 static __always_inline bool PageAnon(struct page *page)
669 {
670 	return folio_test_anon(page_folio(page));
671 }
672 
__PageMovable(struct page * page)673 static __always_inline int __PageMovable(struct page *page)
674 {
675 	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
676 				PAGE_MAPPING_MOVABLE;
677 }
678 
679 #ifdef CONFIG_KSM
680 /*
681  * A KSM page is one of those write-protected "shared pages" or "merged pages"
682  * which KSM maps into multiple mms, wherever identical anonymous page content
683  * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
684  * anon_vma, but to that page's node of the stable tree.
685  */
folio_test_ksm(struct folio * folio)686 static __always_inline bool folio_test_ksm(struct folio *folio)
687 {
688 	return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
689 				PAGE_MAPPING_KSM;
690 }
691 
PageKsm(struct page * page)692 static __always_inline bool PageKsm(struct page *page)
693 {
694 	return folio_test_ksm(page_folio(page));
695 }
696 #else
697 TESTPAGEFLAG_FALSE(Ksm, ksm)
698 #endif
699 
700 u64 stable_page_flags(struct page *page);
701 
702 /**
703  * folio_test_uptodate - Is this folio up to date?
704  * @folio: The folio.
705  *
706  * The uptodate flag is set on a folio when every byte in the folio is
707  * at least as new as the corresponding bytes on storage.  Anonymous
708  * and CoW folios are always uptodate.  If the folio is not uptodate,
709  * some of the bytes in it may be; see the is_partially_uptodate()
710  * address_space operation.
711  */
folio_test_uptodate(struct folio * folio)712 static inline bool folio_test_uptodate(struct folio *folio)
713 {
714 	bool ret = test_bit(PG_uptodate, folio_flags(folio, 0));
715 	/*
716 	 * Must ensure that the data we read out of the folio is loaded
717 	 * _after_ we've loaded folio->flags to check the uptodate bit.
718 	 * We can skip the barrier if the folio is not uptodate, because
719 	 * we wouldn't be reading anything from it.
720 	 *
721 	 * See folio_mark_uptodate() for the other side of the story.
722 	 */
723 	if (ret)
724 		smp_rmb();
725 
726 	return ret;
727 }
728 
PageUptodate(struct page * page)729 static inline int PageUptodate(struct page *page)
730 {
731 	return folio_test_uptodate(page_folio(page));
732 }
733 
__folio_mark_uptodate(struct folio * folio)734 static __always_inline void __folio_mark_uptodate(struct folio *folio)
735 {
736 	smp_wmb();
737 	__set_bit(PG_uptodate, folio_flags(folio, 0));
738 }
739 
folio_mark_uptodate(struct folio * folio)740 static __always_inline void folio_mark_uptodate(struct folio *folio)
741 {
742 	/*
743 	 * Memory barrier must be issued before setting the PG_uptodate bit,
744 	 * so that all previous stores issued in order to bring the folio
745 	 * uptodate are actually visible before folio_test_uptodate becomes true.
746 	 */
747 	smp_wmb();
748 	set_bit(PG_uptodate, folio_flags(folio, 0));
749 }
750 
__SetPageUptodate(struct page * page)751 static __always_inline void __SetPageUptodate(struct page *page)
752 {
753 	__folio_mark_uptodate((struct folio *)page);
754 }
755 
SetPageUptodate(struct page * page)756 static __always_inline void SetPageUptodate(struct page *page)
757 {
758 	folio_mark_uptodate((struct folio *)page);
759 }
760 
761 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
762 
763 bool __folio_start_writeback(struct folio *folio, bool keep_write);
764 bool set_page_writeback(struct page *page);
765 
766 #define folio_start_writeback(folio)			\
767 	__folio_start_writeback(folio, false)
768 #define folio_start_writeback_keepwrite(folio)	\
769 	__folio_start_writeback(folio, true)
770 
set_page_writeback_keepwrite(struct page * page)771 static inline void set_page_writeback_keepwrite(struct page *page)
772 {
773 	folio_start_writeback_keepwrite(page_folio(page));
774 }
775 
test_set_page_writeback(struct page * page)776 static inline bool test_set_page_writeback(struct page *page)
777 {
778 	return set_page_writeback(page);
779 }
780 
folio_test_head(struct folio * folio)781 static __always_inline bool folio_test_head(struct folio *folio)
782 {
783 	return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY));
784 }
785 
PageHead(struct page * page)786 static __always_inline int PageHead(struct page *page)
787 {
788 	PF_POISONED_CHECK(page);
789 	return test_bit(PG_head, &page->flags) && !page_is_fake_head(page);
790 }
791 
__SETPAGEFLAG(Head,head,PF_ANY)792 __SETPAGEFLAG(Head, head, PF_ANY)
793 __CLEARPAGEFLAG(Head, head, PF_ANY)
794 CLEARPAGEFLAG(Head, head, PF_ANY)
795 
796 /**
797  * folio_test_large() - Does this folio contain more than one page?
798  * @folio: The folio to test.
799  *
800  * Return: True if the folio is larger than one page.
801  */
802 static inline bool folio_test_large(struct folio *folio)
803 {
804 	return folio_test_head(folio);
805 }
806 
set_compound_head(struct page * page,struct page * head)807 static __always_inline void set_compound_head(struct page *page, struct page *head)
808 {
809 	WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
810 }
811 
clear_compound_head(struct page * page)812 static __always_inline void clear_compound_head(struct page *page)
813 {
814 	WRITE_ONCE(page->compound_head, 0);
815 }
816 
817 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
ClearPageCompound(struct page * page)818 static inline void ClearPageCompound(struct page *page)
819 {
820 	BUG_ON(!PageHead(page));
821 	ClearPageHead(page);
822 }
823 #endif
824 
825 #define PG_head_mask ((1UL << PG_head))
826 
827 #ifdef CONFIG_HUGETLB_PAGE
828 int PageHuge(struct page *page);
829 int PageHeadHuge(struct page *page);
folio_test_hugetlb(struct folio * folio)830 static inline bool folio_test_hugetlb(struct folio *folio)
831 {
832 	return PageHeadHuge(&folio->page);
833 }
834 #else
TESTPAGEFLAG_FALSE(Huge,hugetlb)835 TESTPAGEFLAG_FALSE(Huge, hugetlb)
836 TESTPAGEFLAG_FALSE(HeadHuge, headhuge)
837 #endif
838 
839 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
840 /*
841  * PageHuge() only returns true for hugetlbfs pages, but not for
842  * normal or transparent huge pages.
843  *
844  * PageTransHuge() returns true for both transparent huge and
845  * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
846  * called only in the core VM paths where hugetlbfs pages can't exist.
847  */
848 static inline int PageTransHuge(struct page *page)
849 {
850 	VM_BUG_ON_PAGE(PageTail(page), page);
851 	return PageHead(page);
852 }
853 
folio_test_transhuge(struct folio * folio)854 static inline bool folio_test_transhuge(struct folio *folio)
855 {
856 	return folio_test_head(folio);
857 }
858 
859 /*
860  * PageTransCompound returns true for both transparent huge pages
861  * and hugetlbfs pages, so it should only be called when it's known
862  * that hugetlbfs pages aren't involved.
863  */
PageTransCompound(struct page * page)864 static inline int PageTransCompound(struct page *page)
865 {
866 	return PageCompound(page);
867 }
868 
869 /*
870  * PageTransTail returns true for both transparent huge pages
871  * and hugetlbfs pages, so it should only be called when it's known
872  * that hugetlbfs pages aren't involved.
873  */
PageTransTail(struct page * page)874 static inline int PageTransTail(struct page *page)
875 {
876 	return PageTail(page);
877 }
878 
879 /*
880  * PageDoubleMap indicates that the compound page is mapped with PTEs as well
881  * as PMDs.
882  *
883  * This is required for optimization of rmap operations for THP: we can postpone
884  * per small page mapcount accounting (and its overhead from atomic operations)
885  * until the first PMD split.
886  *
887  * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
888  * by one. This reference will go away with last compound_mapcount.
889  *
890  * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
891  */
PAGEFLAG(DoubleMap,double_map,PF_SECOND)892 PAGEFLAG(DoubleMap, double_map, PF_SECOND)
893 	TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
894 #else
895 TESTPAGEFLAG_FALSE(TransHuge, transhuge)
896 TESTPAGEFLAG_FALSE(TransCompound, transcompound)
897 TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap)
898 TESTPAGEFLAG_FALSE(TransTail, transtail)
899 PAGEFLAG_FALSE(DoubleMap, double_map)
900 	TESTSCFLAG_FALSE(DoubleMap, double_map)
901 #endif
902 
903 #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
904 /*
905  * PageHasHWPoisoned indicates that at least one subpage is hwpoisoned in the
906  * compound page.
907  *
908  * This flag is set by hwpoison handler.  Cleared by THP split or free page.
909  */
910 PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
911 	TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND)
912 #else
913 PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
914 	TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
915 #endif
916 
917 /*
918  * Check if a page is currently marked HWPoisoned. Note that this check is
919  * best effort only and inherently racy: there is no way to synchronize with
920  * failing hardware.
921  */
922 static inline bool is_page_hwpoison(struct page *page)
923 {
924 	if (PageHWPoison(page))
925 		return true;
926 	return PageHuge(page) && PageHWPoison(compound_head(page));
927 }
928 
929 /*
930  * For pages that are never mapped to userspace (and aren't PageSlab),
931  * page_type may be used.  Because it is initialised to -1, we invert the
932  * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
933  * __ClearPageFoo *sets* the bit used for PageFoo.  We reserve a few high and
934  * low bits so that an underflow or overflow of page_mapcount() won't be
935  * mistaken for a page type value.
936  */
937 
938 #define PAGE_TYPE_BASE	0xf0000000
939 /* Reserve		0x0000007f to catch underflows of page_mapcount */
940 #define PAGE_MAPCOUNT_RESERVE	-128
941 #define PG_buddy	0x00000080
942 #define PG_offline	0x00000100
943 #define PG_table	0x00000200
944 #define PG_guard	0x00000400
945 
946 #define PageType(page, flag)						\
947 	((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
948 
page_has_type(struct page * page)949 static inline int page_has_type(struct page *page)
950 {
951 	return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
952 }
953 
954 #define PAGE_TYPE_OPS(uname, lname)					\
955 static __always_inline int Page##uname(struct page *page)		\
956 {									\
957 	return PageType(page, PG_##lname);				\
958 }									\
959 static __always_inline void __SetPage##uname(struct page *page)		\
960 {									\
961 	VM_BUG_ON_PAGE(!PageType(page, 0), page);			\
962 	page->page_type &= ~PG_##lname;					\
963 }									\
964 static __always_inline void __ClearPage##uname(struct page *page)	\
965 {									\
966 	VM_BUG_ON_PAGE(!Page##uname(page), page);			\
967 	page->page_type |= PG_##lname;					\
968 }
969 
970 /*
971  * PageBuddy() indicates that the page is free and in the buddy system
972  * (see mm/page_alloc.c).
973  */
974 PAGE_TYPE_OPS(Buddy, buddy)
975 
976 /*
977  * PageOffline() indicates that the page is logically offline although the
978  * containing section is online. (e.g. inflated in a balloon driver or
979  * not onlined when onlining the section).
980  * The content of these pages is effectively stale. Such pages should not
981  * be touched (read/write/dump/save) except by their owner.
982  *
983  * If a driver wants to allow to offline unmovable PageOffline() pages without
984  * putting them back to the buddy, it can do so via the memory notifier by
985  * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
986  * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
987  * pages (now with a reference count of zero) are treated like free pages,
988  * allowing the containing memory block to get offlined. A driver that
989  * relies on this feature is aware that re-onlining the memory block will
990  * require to re-set the pages PageOffline() and not giving them to the
991  * buddy via online_page_callback_t.
992  *
993  * There are drivers that mark a page PageOffline() and expect there won't be
994  * any further access to page content. PFN walkers that read content of random
995  * pages should check PageOffline() and synchronize with such drivers using
996  * page_offline_freeze()/page_offline_thaw().
997  */
998 PAGE_TYPE_OPS(Offline, offline)
999 
1000 extern void page_offline_freeze(void);
1001 extern void page_offline_thaw(void);
1002 extern void page_offline_begin(void);
1003 extern void page_offline_end(void);
1004 
1005 /*
1006  * Marks pages in use as page tables.
1007  */
1008 PAGE_TYPE_OPS(Table, table)
1009 
1010 /*
1011  * Marks guardpages used with debug_pagealloc.
1012  */
1013 PAGE_TYPE_OPS(Guard, guard)
1014 
1015 extern bool is_free_buddy_page(struct page *page);
1016 
1017 PAGEFLAG(Isolated, isolated, PF_ANY);
1018 
PageAnonExclusive(struct page * page)1019 static __always_inline int PageAnonExclusive(struct page *page)
1020 {
1021 	VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1022 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1023 	return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1024 }
1025 
SetPageAnonExclusive(struct page * page)1026 static __always_inline void SetPageAnonExclusive(struct page *page)
1027 {
1028 	VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1029 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1030 	set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1031 }
1032 
ClearPageAnonExclusive(struct page * page)1033 static __always_inline void ClearPageAnonExclusive(struct page *page)
1034 {
1035 	VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page);
1036 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1037 	clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1038 }
1039 
__ClearPageAnonExclusive(struct page * page)1040 static __always_inline void __ClearPageAnonExclusive(struct page *page)
1041 {
1042 	VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
1043 	VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
1044 	__clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags);
1045 }
1046 
1047 #ifdef CONFIG_MMU
1048 #define __PG_MLOCKED		(1UL << PG_mlocked)
1049 #else
1050 #define __PG_MLOCKED		0
1051 #endif
1052 
1053 /*
1054  * Flags checked when a page is freed.  Pages being freed should not have
1055  * these flags set.  If they are, there is a problem.
1056  */
1057 #define PAGE_FLAGS_CHECK_AT_FREE				\
1058 	(1UL << PG_lru		| 1UL << PG_locked	|	\
1059 	 1UL << PG_private	| 1UL << PG_private_2	|	\
1060 	 1UL << PG_writeback	| 1UL << PG_reserved	|	\
1061 	 1UL << PG_slab		| 1UL << PG_active 	|	\
1062 	 1UL << PG_unevictable	| __PG_MLOCKED)
1063 
1064 /*
1065  * Flags checked when a page is prepped for return by the page allocator.
1066  * Pages being prepped should not have these flags set.  If they are set,
1067  * there has been a kernel bug or struct page corruption.
1068  *
1069  * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
1070  * alloc-free cycle to prevent from reusing the page.
1071  */
1072 #define PAGE_FLAGS_CHECK_AT_PREP	\
1073 	(PAGEFLAGS_MASK & ~__PG_HWPOISON)
1074 
1075 #define PAGE_FLAGS_PRIVATE				\
1076 	(1UL << PG_private | 1UL << PG_private_2)
1077 /**
1078  * page_has_private - Determine if page has private stuff
1079  * @page: The page to be checked
1080  *
1081  * Determine if a page has private stuff, indicating that release routines
1082  * should be invoked upon it.
1083  */
page_has_private(struct page * page)1084 static inline int page_has_private(struct page *page)
1085 {
1086 	return !!(page->flags & PAGE_FLAGS_PRIVATE);
1087 }
1088 
folio_has_private(struct folio * folio)1089 static inline bool folio_has_private(struct folio *folio)
1090 {
1091 	return page_has_private(&folio->page);
1092 }
1093 
1094 #undef PF_ANY
1095 #undef PF_HEAD
1096 #undef PF_ONLY_HEAD
1097 #undef PF_NO_TAIL
1098 #undef PF_NO_COMPOUND
1099 #undef PF_SECOND
1100 #endif /* !__GENERATING_BOUNDS_H */
1101 
1102 #endif	/* PAGE_FLAGS_H */
1103