1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGEMAP_H
3 #define _LINUX_PAGEMAP_H
4
5 /*
6 * Copyright 1995 Linus Torvalds
7 */
8 #include <linux/mm.h>
9 #include <linux/fs.h>
10 #include <linux/list.h>
11 #include <linux/highmem.h>
12 #include <linux/compiler.h>
13 #include <linux/uaccess.h>
14 #include <linux/gfp.h>
15 #include <linux/bitops.h>
16 #include <linux/hardirq.h> /* for in_interrupt() */
17 #include <linux/hugetlb_inline.h>
18
19 struct folio_batch;
20
21 unsigned long invalidate_mapping_pages(struct address_space *mapping,
22 pgoff_t start, pgoff_t end);
23
invalidate_remote_inode(struct inode * inode)24 static inline void invalidate_remote_inode(struct inode *inode)
25 {
26 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
27 S_ISLNK(inode->i_mode))
28 invalidate_mapping_pages(inode->i_mapping, 0, -1);
29 }
30 int invalidate_inode_pages2(struct address_space *mapping);
31 int invalidate_inode_pages2_range(struct address_space *mapping,
32 pgoff_t start, pgoff_t end);
33 int kiocb_invalidate_pages(struct kiocb *iocb, size_t count);
34 void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count);
35
36 int write_inode_now(struct inode *, int sync);
37 int filemap_fdatawrite(struct address_space *);
38 int filemap_flush(struct address_space *);
39 int filemap_fdatawait_keep_errors(struct address_space *mapping);
40 int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
41 int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
42 loff_t start_byte, loff_t end_byte);
43
filemap_fdatawait(struct address_space * mapping)44 static inline int filemap_fdatawait(struct address_space *mapping)
45 {
46 return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
47 }
48
49 bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
50 int filemap_write_and_wait_range(struct address_space *mapping,
51 loff_t lstart, loff_t lend);
52 int __filemap_fdatawrite_range(struct address_space *mapping,
53 loff_t start, loff_t end, int sync_mode);
54 int filemap_fdatawrite_range(struct address_space *mapping,
55 loff_t start, loff_t end);
56 int filemap_check_errors(struct address_space *mapping);
57 void __filemap_set_wb_err(struct address_space *mapping, int err);
58 int filemap_fdatawrite_wbc(struct address_space *mapping,
59 struct writeback_control *wbc);
60 int kiocb_write_and_wait(struct kiocb *iocb, size_t count);
61
filemap_write_and_wait(struct address_space * mapping)62 static inline int filemap_write_and_wait(struct address_space *mapping)
63 {
64 return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
65 }
66
67 /**
68 * filemap_set_wb_err - set a writeback error on an address_space
69 * @mapping: mapping in which to set writeback error
70 * @err: error to be set in mapping
71 *
72 * When writeback fails in some way, we must record that error so that
73 * userspace can be informed when fsync and the like are called. We endeavor
74 * to report errors on any file that was open at the time of the error. Some
75 * internal callers also need to know when writeback errors have occurred.
76 *
77 * When a writeback error occurs, most filesystems will want to call
78 * filemap_set_wb_err to record the error in the mapping so that it will be
79 * automatically reported whenever fsync is called on the file.
80 */
filemap_set_wb_err(struct address_space * mapping,int err)81 static inline void filemap_set_wb_err(struct address_space *mapping, int err)
82 {
83 /* Fastpath for common case of no error */
84 if (unlikely(err))
85 __filemap_set_wb_err(mapping, err);
86 }
87
88 /**
89 * filemap_check_wb_err - has an error occurred since the mark was sampled?
90 * @mapping: mapping to check for writeback errors
91 * @since: previously-sampled errseq_t
92 *
93 * Grab the errseq_t value from the mapping, and see if it has changed "since"
94 * the given value was sampled.
95 *
96 * If it has then report the latest error set, otherwise return 0.
97 */
filemap_check_wb_err(struct address_space * mapping,errseq_t since)98 static inline int filemap_check_wb_err(struct address_space *mapping,
99 errseq_t since)
100 {
101 return errseq_check(&mapping->wb_err, since);
102 }
103
104 /**
105 * filemap_sample_wb_err - sample the current errseq_t to test for later errors
106 * @mapping: mapping to be sampled
107 *
108 * Writeback errors are always reported relative to a particular sample point
109 * in the past. This function provides those sample points.
110 */
filemap_sample_wb_err(struct address_space * mapping)111 static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
112 {
113 return errseq_sample(&mapping->wb_err);
114 }
115
116 /**
117 * file_sample_sb_err - sample the current errseq_t to test for later errors
118 * @file: file pointer to be sampled
119 *
120 * Grab the most current superblock-level errseq_t value for the given
121 * struct file.
122 */
file_sample_sb_err(struct file * file)123 static inline errseq_t file_sample_sb_err(struct file *file)
124 {
125 return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
126 }
127
128 /*
129 * Flush file data before changing attributes. Caller must hold any locks
130 * required to prevent further writes to this file until we're done setting
131 * flags.
132 */
inode_drain_writes(struct inode * inode)133 static inline int inode_drain_writes(struct inode *inode)
134 {
135 inode_dio_wait(inode);
136 return filemap_write_and_wait(inode->i_mapping);
137 }
138
mapping_empty(struct address_space * mapping)139 static inline bool mapping_empty(struct address_space *mapping)
140 {
141 return xa_empty(&mapping->i_pages);
142 }
143
144 /*
145 * mapping_shrinkable - test if page cache state allows inode reclaim
146 * @mapping: the page cache mapping
147 *
148 * This checks the mapping's cache state for the pupose of inode
149 * reclaim and LRU management.
150 *
151 * The caller is expected to hold the i_lock, but is not required to
152 * hold the i_pages lock, which usually protects cache state. That's
153 * because the i_lock and the list_lru lock that protect the inode and
154 * its LRU state don't nest inside the irq-safe i_pages lock.
155 *
156 * Cache deletions are performed under the i_lock, which ensures that
157 * when an inode goes empty, it will reliably get queued on the LRU.
158 *
159 * Cache additions do not acquire the i_lock and may race with this
160 * check, in which case we'll report the inode as shrinkable when it
161 * has cache pages. This is okay: the shrinker also checks the
162 * refcount and the referenced bit, which will be elevated or set in
163 * the process of adding new cache pages to an inode.
164 */
mapping_shrinkable(struct address_space * mapping)165 static inline bool mapping_shrinkable(struct address_space *mapping)
166 {
167 void *head;
168
169 /*
170 * On highmem systems, there could be lowmem pressure from the
171 * inodes before there is highmem pressure from the page
172 * cache. Make inodes shrinkable regardless of cache state.
173 */
174 if (IS_ENABLED(CONFIG_HIGHMEM))
175 return true;
176
177 /* Cache completely empty? Shrink away. */
178 head = rcu_access_pointer(mapping->i_pages.xa_head);
179 if (!head)
180 return true;
181
182 /*
183 * The xarray stores single offset-0 entries directly in the
184 * head pointer, which allows non-resident page cache entries
185 * to escape the shadow shrinker's list of xarray nodes. The
186 * inode shrinker needs to pick them up under memory pressure.
187 */
188 if (!xa_is_node(head) && xa_is_value(head))
189 return true;
190
191 return false;
192 }
193
194 /*
195 * Bits in mapping->flags.
196 */
197 enum mapping_flags {
198 AS_EIO = 0, /* IO error on async write */
199 AS_ENOSPC = 1, /* ENOSPC on async write */
200 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
201 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
202 AS_EXITING = 4, /* final truncate in progress */
203 /* writeback related tags are not used */
204 AS_NO_WRITEBACK_TAGS = 5,
205 AS_LARGE_FOLIO_SUPPORT = 6,
206 AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
207 AS_STABLE_WRITES, /* must wait for writeback before modifying
208 folio contents */
209 };
210
211 /**
212 * mapping_set_error - record a writeback error in the address_space
213 * @mapping: the mapping in which an error should be set
214 * @error: the error to set in the mapping
215 *
216 * When writeback fails in some way, we must record that error so that
217 * userspace can be informed when fsync and the like are called. We endeavor
218 * to report errors on any file that was open at the time of the error. Some
219 * internal callers also need to know when writeback errors have occurred.
220 *
221 * When a writeback error occurs, most filesystems will want to call
222 * mapping_set_error to record the error in the mapping so that it can be
223 * reported when the application calls fsync(2).
224 */
mapping_set_error(struct address_space * mapping,int error)225 static inline void mapping_set_error(struct address_space *mapping, int error)
226 {
227 if (likely(!error))
228 return;
229
230 /* Record in wb_err for checkers using errseq_t based tracking */
231 __filemap_set_wb_err(mapping, error);
232
233 /* Record it in superblock */
234 if (mapping->host)
235 errseq_set(&mapping->host->i_sb->s_wb_err, error);
236
237 /* Record it in flags for now, for legacy callers */
238 if (error == -ENOSPC)
239 set_bit(AS_ENOSPC, &mapping->flags);
240 else
241 set_bit(AS_EIO, &mapping->flags);
242 }
243
mapping_set_unevictable(struct address_space * mapping)244 static inline void mapping_set_unevictable(struct address_space *mapping)
245 {
246 set_bit(AS_UNEVICTABLE, &mapping->flags);
247 }
248
mapping_clear_unevictable(struct address_space * mapping)249 static inline void mapping_clear_unevictable(struct address_space *mapping)
250 {
251 clear_bit(AS_UNEVICTABLE, &mapping->flags);
252 }
253
mapping_unevictable(struct address_space * mapping)254 static inline bool mapping_unevictable(struct address_space *mapping)
255 {
256 return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
257 }
258
mapping_set_exiting(struct address_space * mapping)259 static inline void mapping_set_exiting(struct address_space *mapping)
260 {
261 set_bit(AS_EXITING, &mapping->flags);
262 }
263
mapping_exiting(struct address_space * mapping)264 static inline int mapping_exiting(struct address_space *mapping)
265 {
266 return test_bit(AS_EXITING, &mapping->flags);
267 }
268
mapping_set_no_writeback_tags(struct address_space * mapping)269 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
270 {
271 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
272 }
273
mapping_use_writeback_tags(struct address_space * mapping)274 static inline int mapping_use_writeback_tags(struct address_space *mapping)
275 {
276 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
277 }
278
mapping_release_always(const struct address_space * mapping)279 static inline bool mapping_release_always(const struct address_space *mapping)
280 {
281 return test_bit(AS_RELEASE_ALWAYS, &mapping->flags);
282 }
283
mapping_set_release_always(struct address_space * mapping)284 static inline void mapping_set_release_always(struct address_space *mapping)
285 {
286 set_bit(AS_RELEASE_ALWAYS, &mapping->flags);
287 }
288
mapping_clear_release_always(struct address_space * mapping)289 static inline void mapping_clear_release_always(struct address_space *mapping)
290 {
291 clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
292 }
293
mapping_stable_writes(const struct address_space * mapping)294 static inline bool mapping_stable_writes(const struct address_space *mapping)
295 {
296 return test_bit(AS_STABLE_WRITES, &mapping->flags);
297 }
298
mapping_set_stable_writes(struct address_space * mapping)299 static inline void mapping_set_stable_writes(struct address_space *mapping)
300 {
301 set_bit(AS_STABLE_WRITES, &mapping->flags);
302 }
303
mapping_clear_stable_writes(struct address_space * mapping)304 static inline void mapping_clear_stable_writes(struct address_space *mapping)
305 {
306 clear_bit(AS_STABLE_WRITES, &mapping->flags);
307 }
308
mapping_gfp_mask(struct address_space * mapping)309 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
310 {
311 return mapping->gfp_mask;
312 }
313
314 /* Restricts the given gfp_mask to what the mapping allows. */
mapping_gfp_constraint(struct address_space * mapping,gfp_t gfp_mask)315 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
316 gfp_t gfp_mask)
317 {
318 return mapping_gfp_mask(mapping) & gfp_mask;
319 }
320
321 /*
322 * This is non-atomic. Only to be used before the mapping is activated.
323 * Probably needs a barrier...
324 */
mapping_set_gfp_mask(struct address_space * m,gfp_t mask)325 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
326 {
327 m->gfp_mask = mask;
328 }
329
330 /**
331 * mapping_set_large_folios() - Indicate the file supports large folios.
332 * @mapping: The file.
333 *
334 * The filesystem should call this function in its inode constructor to
335 * indicate that the VFS can use large folios to cache the contents of
336 * the file.
337 *
338 * Context: This should not be called while the inode is active as it
339 * is non-atomic.
340 */
mapping_set_large_folios(struct address_space * mapping)341 static inline void mapping_set_large_folios(struct address_space *mapping)
342 {
343 __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
344 }
345
346 /*
347 * Large folio support currently depends on THP. These dependencies are
348 * being worked on but are not yet fixed.
349 */
mapping_large_folio_support(struct address_space * mapping)350 static inline bool mapping_large_folio_support(struct address_space *mapping)
351 {
352 return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
353 test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
354 }
355
filemap_nr_thps(struct address_space * mapping)356 static inline int filemap_nr_thps(struct address_space *mapping)
357 {
358 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
359 return atomic_read(&mapping->nr_thps);
360 #else
361 return 0;
362 #endif
363 }
364
filemap_nr_thps_inc(struct address_space * mapping)365 static inline void filemap_nr_thps_inc(struct address_space *mapping)
366 {
367 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
368 if (!mapping_large_folio_support(mapping))
369 atomic_inc(&mapping->nr_thps);
370 #else
371 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
372 #endif
373 }
374
filemap_nr_thps_dec(struct address_space * mapping)375 static inline void filemap_nr_thps_dec(struct address_space *mapping)
376 {
377 #ifdef CONFIG_READ_ONLY_THP_FOR_FS
378 if (!mapping_large_folio_support(mapping))
379 atomic_dec(&mapping->nr_thps);
380 #else
381 WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
382 #endif
383 }
384
385 struct address_space *page_mapping(struct page *);
386 struct address_space *folio_mapping(struct folio *);
387 struct address_space *swapcache_mapping(struct folio *);
388
389 /**
390 * folio_file_mapping - Find the mapping this folio belongs to.
391 * @folio: The folio.
392 *
393 * For folios which are in the page cache, return the mapping that this
394 * page belongs to. Folios in the swap cache return the mapping of the
395 * swap file or swap device where the data is stored. This is different
396 * from the mapping returned by folio_mapping(). The only reason to
397 * use it is if, like NFS, you return 0 from ->activate_swapfile.
398 *
399 * Do not call this for folios which aren't in the page cache or swap cache.
400 */
folio_file_mapping(struct folio * folio)401 static inline struct address_space *folio_file_mapping(struct folio *folio)
402 {
403 if (unlikely(folio_test_swapcache(folio)))
404 return swapcache_mapping(folio);
405
406 return folio->mapping;
407 }
408
409 /**
410 * folio_flush_mapping - Find the file mapping this folio belongs to.
411 * @folio: The folio.
412 *
413 * For folios which are in the page cache, return the mapping that this
414 * page belongs to. Anonymous folios return NULL, even if they're in
415 * the swap cache. Other kinds of folio also return NULL.
416 *
417 * This is ONLY used by architecture cache flushing code. If you aren't
418 * writing cache flushing code, you want either folio_mapping() or
419 * folio_file_mapping().
420 */
folio_flush_mapping(struct folio * folio)421 static inline struct address_space *folio_flush_mapping(struct folio *folio)
422 {
423 if (unlikely(folio_test_swapcache(folio)))
424 return NULL;
425
426 return folio_mapping(folio);
427 }
428
page_file_mapping(struct page * page)429 static inline struct address_space *page_file_mapping(struct page *page)
430 {
431 return folio_file_mapping(page_folio(page));
432 }
433
434 /**
435 * folio_inode - Get the host inode for this folio.
436 * @folio: The folio.
437 *
438 * For folios which are in the page cache, return the inode that this folio
439 * belongs to.
440 *
441 * Do not call this for folios which aren't in the page cache.
442 */
folio_inode(struct folio * folio)443 static inline struct inode *folio_inode(struct folio *folio)
444 {
445 return folio->mapping->host;
446 }
447
448 /**
449 * folio_attach_private - Attach private data to a folio.
450 * @folio: Folio to attach data to.
451 * @data: Data to attach to folio.
452 *
453 * Attaching private data to a folio increments the page's reference count.
454 * The data must be detached before the folio will be freed.
455 */
folio_attach_private(struct folio * folio,void * data)456 static inline void folio_attach_private(struct folio *folio, void *data)
457 {
458 folio_get(folio);
459 folio->private = data;
460 folio_set_private(folio);
461 }
462
463 /**
464 * folio_change_private - Change private data on a folio.
465 * @folio: Folio to change the data on.
466 * @data: Data to set on the folio.
467 *
468 * Change the private data attached to a folio and return the old
469 * data. The page must previously have had data attached and the data
470 * must be detached before the folio will be freed.
471 *
472 * Return: Data that was previously attached to the folio.
473 */
folio_change_private(struct folio * folio,void * data)474 static inline void *folio_change_private(struct folio *folio, void *data)
475 {
476 void *old = folio_get_private(folio);
477
478 folio->private = data;
479 return old;
480 }
481
482 /**
483 * folio_detach_private - Detach private data from a folio.
484 * @folio: Folio to detach data from.
485 *
486 * Removes the data that was previously attached to the folio and decrements
487 * the refcount on the page.
488 *
489 * Return: Data that was attached to the folio.
490 */
folio_detach_private(struct folio * folio)491 static inline void *folio_detach_private(struct folio *folio)
492 {
493 void *data = folio_get_private(folio);
494
495 if (!folio_test_private(folio))
496 return NULL;
497 folio_clear_private(folio);
498 folio->private = NULL;
499 folio_put(folio);
500
501 return data;
502 }
503
attach_page_private(struct page * page,void * data)504 static inline void attach_page_private(struct page *page, void *data)
505 {
506 folio_attach_private(page_folio(page), data);
507 }
508
detach_page_private(struct page * page)509 static inline void *detach_page_private(struct page *page)
510 {
511 return folio_detach_private(page_folio(page));
512 }
513
514 /*
515 * There are some parts of the kernel which assume that PMD entries
516 * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
517 * limit the maximum allocation order to PMD size. I'm not aware of any
518 * assumptions about maximum order if THP are disabled, but 8 seems like
519 * a good order (that's 1MB if you're using 4kB pages)
520 */
521 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
522 #define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
523 #else
524 #define MAX_PAGECACHE_ORDER 8
525 #endif
526
527 #ifdef CONFIG_NUMA
528 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
529 #else
filemap_alloc_folio(gfp_t gfp,unsigned int order)530 static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
531 {
532 return folio_alloc(gfp, order);
533 }
534 #endif
535
__page_cache_alloc(gfp_t gfp)536 static inline struct page *__page_cache_alloc(gfp_t gfp)
537 {
538 return &filemap_alloc_folio(gfp, 0)->page;
539 }
540
page_cache_alloc(struct address_space * x)541 static inline struct page *page_cache_alloc(struct address_space *x)
542 {
543 return __page_cache_alloc(mapping_gfp_mask(x));
544 }
545
readahead_gfp_mask(struct address_space * x)546 static inline gfp_t readahead_gfp_mask(struct address_space *x)
547 {
548 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
549 }
550
551 typedef int filler_t(struct file *, struct folio *);
552
553 pgoff_t page_cache_next_miss(struct address_space *mapping,
554 pgoff_t index, unsigned long max_scan);
555 pgoff_t page_cache_prev_miss(struct address_space *mapping,
556 pgoff_t index, unsigned long max_scan);
557
558 /**
559 * typedef fgf_t - Flags for getting folios from the page cache.
560 *
561 * Most users of the page cache will not need to use these flags;
562 * there are convenience functions such as filemap_get_folio() and
563 * filemap_lock_folio(). For users which need more control over exactly
564 * what is done with the folios, these flags to __filemap_get_folio()
565 * are available.
566 *
567 * * %FGP_ACCESSED - The folio will be marked accessed.
568 * * %FGP_LOCK - The folio is returned locked.
569 * * %FGP_CREAT - If no folio is present then a new folio is allocated,
570 * added to the page cache and the VM's LRU list. The folio is
571 * returned locked.
572 * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
573 * folio is already in cache. If the folio was allocated, unlock it
574 * before returning so the caller can do the same dance.
575 * * %FGP_WRITE - The folio will be written to by the caller.
576 * * %FGP_NOFS - __GFP_FS will get cleared in gfp.
577 * * %FGP_NOWAIT - Don't block on the folio lock.
578 * * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
579 * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
580 * implementation.
581 */
582 typedef unsigned int __bitwise fgf_t;
583
584 #define FGP_ACCESSED ((__force fgf_t)0x00000001)
585 #define FGP_LOCK ((__force fgf_t)0x00000002)
586 #define FGP_CREAT ((__force fgf_t)0x00000004)
587 #define FGP_WRITE ((__force fgf_t)0x00000008)
588 #define FGP_NOFS ((__force fgf_t)0x00000010)
589 #define FGP_NOWAIT ((__force fgf_t)0x00000020)
590 #define FGP_FOR_MMAP ((__force fgf_t)0x00000040)
591 #define FGP_STABLE ((__force fgf_t)0x00000080)
592 #define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */
593
594 #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
595
596 /**
597 * fgf_set_order - Encode a length in the fgf_t flags.
598 * @size: The suggested size of the folio to create.
599 *
600 * The caller of __filemap_get_folio() can use this to suggest a preferred
601 * size for the folio that is created. If there is already a folio at
602 * the index, it will be returned, no matter what its size. If a folio
603 * is freshly created, it may be of a different size than requested
604 * due to alignment constraints, memory pressure, or the presence of
605 * other folios at nearby indices.
606 */
fgf_set_order(size_t size)607 static inline fgf_t fgf_set_order(size_t size)
608 {
609 unsigned int shift = ilog2(size);
610
611 if (shift <= PAGE_SHIFT)
612 return 0;
613 return (__force fgf_t)((shift - PAGE_SHIFT) << 26);
614 }
615
616 void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
617 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
618 fgf_t fgp_flags, gfp_t gfp);
619 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
620 fgf_t fgp_flags, gfp_t gfp);
621
622 /**
623 * filemap_get_folio - Find and get a folio.
624 * @mapping: The address_space to search.
625 * @index: The page index.
626 *
627 * Looks up the page cache entry at @mapping & @index. If a folio is
628 * present, it is returned with an increased refcount.
629 *
630 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
631 * this index. Will not return a shadow, swap or DAX entry.
632 */
filemap_get_folio(struct address_space * mapping,pgoff_t index)633 static inline struct folio *filemap_get_folio(struct address_space *mapping,
634 pgoff_t index)
635 {
636 return __filemap_get_folio(mapping, index, 0, 0);
637 }
638
639 /**
640 * filemap_lock_folio - Find and lock a folio.
641 * @mapping: The address_space to search.
642 * @index: The page index.
643 *
644 * Looks up the page cache entry at @mapping & @index. If a folio is
645 * present, it is returned locked with an increased refcount.
646 *
647 * Context: May sleep.
648 * Return: A folio or ERR_PTR(-ENOENT) if there is no folio in the cache for
649 * this index. Will not return a shadow, swap or DAX entry.
650 */
filemap_lock_folio(struct address_space * mapping,pgoff_t index)651 static inline struct folio *filemap_lock_folio(struct address_space *mapping,
652 pgoff_t index)
653 {
654 return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
655 }
656
657 /**
658 * filemap_grab_folio - grab a folio from the page cache
659 * @mapping: The address space to search
660 * @index: The page index
661 *
662 * Looks up the page cache entry at @mapping & @index. If no folio is found,
663 * a new folio is created. The folio is locked, marked as accessed, and
664 * returned.
665 *
666 * Return: A found or created folio. ERR_PTR(-ENOMEM) if no folio is found
667 * and failed to create a folio.
668 */
filemap_grab_folio(struct address_space * mapping,pgoff_t index)669 static inline struct folio *filemap_grab_folio(struct address_space *mapping,
670 pgoff_t index)
671 {
672 return __filemap_get_folio(mapping, index,
673 FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
674 mapping_gfp_mask(mapping));
675 }
676
677 /**
678 * find_get_page - find and get a page reference
679 * @mapping: the address_space to search
680 * @offset: the page index
681 *
682 * Looks up the page cache slot at @mapping & @offset. If there is a
683 * page cache page, it is returned with an increased refcount.
684 *
685 * Otherwise, %NULL is returned.
686 */
find_get_page(struct address_space * mapping,pgoff_t offset)687 static inline struct page *find_get_page(struct address_space *mapping,
688 pgoff_t offset)
689 {
690 return pagecache_get_page(mapping, offset, 0, 0);
691 }
692
find_get_page_flags(struct address_space * mapping,pgoff_t offset,fgf_t fgp_flags)693 static inline struct page *find_get_page_flags(struct address_space *mapping,
694 pgoff_t offset, fgf_t fgp_flags)
695 {
696 return pagecache_get_page(mapping, offset, fgp_flags, 0);
697 }
698
699 /**
700 * find_lock_page - locate, pin and lock a pagecache page
701 * @mapping: the address_space to search
702 * @index: the page index
703 *
704 * Looks up the page cache entry at @mapping & @index. If there is a
705 * page cache page, it is returned locked and with an increased
706 * refcount.
707 *
708 * Context: May sleep.
709 * Return: A struct page or %NULL if there is no page in the cache for this
710 * index.
711 */
find_lock_page(struct address_space * mapping,pgoff_t index)712 static inline struct page *find_lock_page(struct address_space *mapping,
713 pgoff_t index)
714 {
715 return pagecache_get_page(mapping, index, FGP_LOCK, 0);
716 }
717
718 /**
719 * find_or_create_page - locate or add a pagecache page
720 * @mapping: the page's address_space
721 * @index: the page's index into the mapping
722 * @gfp_mask: page allocation mode
723 *
724 * Looks up the page cache slot at @mapping & @offset. If there is a
725 * page cache page, it is returned locked and with an increased
726 * refcount.
727 *
728 * If the page is not present, a new page is allocated using @gfp_mask
729 * and added to the page cache and the VM's LRU list. The page is
730 * returned locked and with an increased refcount.
731 *
732 * On memory exhaustion, %NULL is returned.
733 *
734 * find_or_create_page() may sleep, even if @gfp_flags specifies an
735 * atomic allocation!
736 */
find_or_create_page(struct address_space * mapping,pgoff_t index,gfp_t gfp_mask)737 static inline struct page *find_or_create_page(struct address_space *mapping,
738 pgoff_t index, gfp_t gfp_mask)
739 {
740 return pagecache_get_page(mapping, index,
741 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
742 gfp_mask);
743 }
744
745 /**
746 * grab_cache_page_nowait - returns locked page at given index in given cache
747 * @mapping: target address_space
748 * @index: the page index
749 *
750 * Same as grab_cache_page(), but do not wait if the page is unavailable.
751 * This is intended for speculative data generators, where the data can
752 * be regenerated if the page couldn't be grabbed. This routine should
753 * be safe to call while holding the lock for another page.
754 *
755 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
756 * and deadlock against the caller's locked page.
757 */
grab_cache_page_nowait(struct address_space * mapping,pgoff_t index)758 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
759 pgoff_t index)
760 {
761 return pagecache_get_page(mapping, index,
762 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
763 mapping_gfp_mask(mapping));
764 }
765
766 #define swapcache_index(folio) __page_file_index(&(folio)->page)
767
768 /**
769 * folio_index - File index of a folio.
770 * @folio: The folio.
771 *
772 * For a folio which is either in the page cache or the swap cache,
773 * return its index within the address_space it belongs to. If you know
774 * the page is definitely in the page cache, you can look at the folio's
775 * index directly.
776 *
777 * Return: The index (offset in units of pages) of a folio in its file.
778 */
folio_index(struct folio * folio)779 static inline pgoff_t folio_index(struct folio *folio)
780 {
781 if (unlikely(folio_test_swapcache(folio)))
782 return swapcache_index(folio);
783 return folio->index;
784 }
785
786 /**
787 * folio_next_index - Get the index of the next folio.
788 * @folio: The current folio.
789 *
790 * Return: The index of the folio which follows this folio in the file.
791 */
folio_next_index(struct folio * folio)792 static inline pgoff_t folio_next_index(struct folio *folio)
793 {
794 return folio->index + folio_nr_pages(folio);
795 }
796
797 /**
798 * folio_file_page - The page for a particular index.
799 * @folio: The folio which contains this index.
800 * @index: The index we want to look up.
801 *
802 * Sometimes after looking up a folio in the page cache, we need to
803 * obtain the specific page for an index (eg a page fault).
804 *
805 * Return: The page containing the file data for this index.
806 */
folio_file_page(struct folio * folio,pgoff_t index)807 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
808 {
809 /* HugeTLBfs indexes the page cache in units of hpage_size */
810 if (folio_test_hugetlb(folio))
811 return &folio->page;
812 return folio_page(folio, index & (folio_nr_pages(folio) - 1));
813 }
814
815 /**
816 * folio_contains - Does this folio contain this index?
817 * @folio: The folio.
818 * @index: The page index within the file.
819 *
820 * Context: The caller should have the page locked in order to prevent
821 * (eg) shmem from moving the page between the page cache and swap cache
822 * and changing its index in the middle of the operation.
823 * Return: true or false.
824 */
folio_contains(struct folio * folio,pgoff_t index)825 static inline bool folio_contains(struct folio *folio, pgoff_t index)
826 {
827 /* HugeTLBfs indexes the page cache in units of hpage_size */
828 if (folio_test_hugetlb(folio))
829 return folio->index == index;
830 return index - folio_index(folio) < folio_nr_pages(folio);
831 }
832
833 /*
834 * Given the page we found in the page cache, return the page corresponding
835 * to this index in the file
836 */
find_subpage(struct page * head,pgoff_t index)837 static inline struct page *find_subpage(struct page *head, pgoff_t index)
838 {
839 /* HugeTLBfs wants the head page regardless */
840 if (PageHuge(head))
841 return head;
842
843 return head + (index & (thp_nr_pages(head) - 1));
844 }
845
846 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
847 pgoff_t end, struct folio_batch *fbatch);
848 unsigned filemap_get_folios_contig(struct address_space *mapping,
849 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
850 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
851 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
852
853 struct page *grab_cache_page_write_begin(struct address_space *mapping,
854 pgoff_t index);
855
856 /*
857 * Returns locked page at given index in given cache, creating it if needed.
858 */
grab_cache_page(struct address_space * mapping,pgoff_t index)859 static inline struct page *grab_cache_page(struct address_space *mapping,
860 pgoff_t index)
861 {
862 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
863 }
864
865 struct folio *read_cache_folio(struct address_space *, pgoff_t index,
866 filler_t *filler, struct file *file);
867 struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
868 gfp_t flags);
869 struct page *read_cache_page(struct address_space *, pgoff_t index,
870 filler_t *filler, struct file *file);
871 extern struct page * read_cache_page_gfp(struct address_space *mapping,
872 pgoff_t index, gfp_t gfp_mask);
873
read_mapping_page(struct address_space * mapping,pgoff_t index,struct file * file)874 static inline struct page *read_mapping_page(struct address_space *mapping,
875 pgoff_t index, struct file *file)
876 {
877 return read_cache_page(mapping, index, NULL, file);
878 }
879
read_mapping_folio(struct address_space * mapping,pgoff_t index,struct file * file)880 static inline struct folio *read_mapping_folio(struct address_space *mapping,
881 pgoff_t index, struct file *file)
882 {
883 return read_cache_folio(mapping, index, NULL, file);
884 }
885
886 /*
887 * Get index of the page within radix-tree (but not for hugetlb pages).
888 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
889 */
page_to_index(struct page * page)890 static inline pgoff_t page_to_index(struct page *page)
891 {
892 struct page *head;
893
894 if (likely(!PageTransTail(page)))
895 return page->index;
896
897 head = compound_head(page);
898 /*
899 * We don't initialize ->index for tail pages: calculate based on
900 * head page
901 */
902 return head->index + page - head;
903 }
904
905 extern pgoff_t hugetlb_basepage_index(struct page *page);
906
907 /*
908 * Get the offset in PAGE_SIZE (even for hugetlb pages).
909 * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
910 */
page_to_pgoff(struct page * page)911 static inline pgoff_t page_to_pgoff(struct page *page)
912 {
913 if (unlikely(PageHuge(page)))
914 return hugetlb_basepage_index(page);
915 return page_to_index(page);
916 }
917
918 /*
919 * Return byte-offset into filesystem object for page.
920 */
page_offset(struct page * page)921 static inline loff_t page_offset(struct page *page)
922 {
923 return ((loff_t)page->index) << PAGE_SHIFT;
924 }
925
page_file_offset(struct page * page)926 static inline loff_t page_file_offset(struct page *page)
927 {
928 return ((loff_t)page_index(page)) << PAGE_SHIFT;
929 }
930
931 /**
932 * folio_pos - Returns the byte position of this folio in its file.
933 * @folio: The folio.
934 */
folio_pos(struct folio * folio)935 static inline loff_t folio_pos(struct folio *folio)
936 {
937 return page_offset(&folio->page);
938 }
939
940 /**
941 * folio_file_pos - Returns the byte position of this folio in its file.
942 * @folio: The folio.
943 *
944 * This differs from folio_pos() for folios which belong to a swap file.
945 * NFS is the only filesystem today which needs to use folio_file_pos().
946 */
folio_file_pos(struct folio * folio)947 static inline loff_t folio_file_pos(struct folio *folio)
948 {
949 return page_file_offset(&folio->page);
950 }
951
952 /*
953 * Get the offset in PAGE_SIZE (even for hugetlb folios).
954 * (TODO: hugetlb folios should have ->index in PAGE_SIZE)
955 */
folio_pgoff(struct folio * folio)956 static inline pgoff_t folio_pgoff(struct folio *folio)
957 {
958 if (unlikely(folio_test_hugetlb(folio)))
959 return hugetlb_basepage_index(&folio->page);
960 return folio->index;
961 }
962
963 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
964 unsigned long address);
965
linear_page_index(struct vm_area_struct * vma,unsigned long address)966 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
967 unsigned long address)
968 {
969 pgoff_t pgoff;
970 if (unlikely(is_vm_hugetlb_page(vma)))
971 return linear_hugepage_index(vma, address);
972 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
973 pgoff += vma->vm_pgoff;
974 return pgoff;
975 }
976
977 struct wait_page_key {
978 struct folio *folio;
979 int bit_nr;
980 int page_match;
981 };
982
983 struct wait_page_queue {
984 struct folio *folio;
985 int bit_nr;
986 wait_queue_entry_t wait;
987 };
988
wake_page_match(struct wait_page_queue * wait_page,struct wait_page_key * key)989 static inline bool wake_page_match(struct wait_page_queue *wait_page,
990 struct wait_page_key *key)
991 {
992 if (wait_page->folio != key->folio)
993 return false;
994 key->page_match = 1;
995
996 if (wait_page->bit_nr != key->bit_nr)
997 return false;
998
999 return true;
1000 }
1001
1002 void __folio_lock(struct folio *folio);
1003 int __folio_lock_killable(struct folio *folio);
1004 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
1005 void unlock_page(struct page *page);
1006 void folio_unlock(struct folio *folio);
1007
1008 /**
1009 * folio_trylock() - Attempt to lock a folio.
1010 * @folio: The folio to attempt to lock.
1011 *
1012 * Sometimes it is undesirable to wait for a folio to be unlocked (eg
1013 * when the locks are being taken in the wrong order, or if making
1014 * progress through a batch of folios is more important than processing
1015 * them in order). Usually folio_lock() is the correct function to call.
1016 *
1017 * Context: Any context.
1018 * Return: Whether the lock was successfully acquired.
1019 */
folio_trylock(struct folio * folio)1020 static inline bool folio_trylock(struct folio *folio)
1021 {
1022 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
1023 }
1024
1025 /*
1026 * Return true if the page was successfully locked
1027 */
trylock_page(struct page * page)1028 static inline int trylock_page(struct page *page)
1029 {
1030 return folio_trylock(page_folio(page));
1031 }
1032
1033 /**
1034 * folio_lock() - Lock this folio.
1035 * @folio: The folio to lock.
1036 *
1037 * The folio lock protects against many things, probably more than it
1038 * should. It is primarily held while a folio is being brought uptodate,
1039 * either from its backing file or from swap. It is also held while a
1040 * folio is being truncated from its address_space, so holding the lock
1041 * is sufficient to keep folio->mapping stable.
1042 *
1043 * The folio lock is also held while write() is modifying the page to
1044 * provide POSIX atomicity guarantees (as long as the write does not
1045 * cross a page boundary). Other modifications to the data in the folio
1046 * do not hold the folio lock and can race with writes, eg DMA and stores
1047 * to mapped pages.
1048 *
1049 * Context: May sleep. If you need to acquire the locks of two or
1050 * more folios, they must be in order of ascending index, if they are
1051 * in the same address_space. If they are in different address_spaces,
1052 * acquire the lock of the folio which belongs to the address_space which
1053 * has the lowest address in memory first.
1054 */
folio_lock(struct folio * folio)1055 static inline void folio_lock(struct folio *folio)
1056 {
1057 might_sleep();
1058 if (!folio_trylock(folio))
1059 __folio_lock(folio);
1060 }
1061
1062 /**
1063 * lock_page() - Lock the folio containing this page.
1064 * @page: The page to lock.
1065 *
1066 * See folio_lock() for a description of what the lock protects.
1067 * This is a legacy function and new code should probably use folio_lock()
1068 * instead.
1069 *
1070 * Context: May sleep. Pages in the same folio share a lock, so do not
1071 * attempt to lock two pages which share a folio.
1072 */
lock_page(struct page * page)1073 static inline void lock_page(struct page *page)
1074 {
1075 struct folio *folio;
1076 might_sleep();
1077
1078 folio = page_folio(page);
1079 if (!folio_trylock(folio))
1080 __folio_lock(folio);
1081 }
1082
1083 /**
1084 * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
1085 * @folio: The folio to lock.
1086 *
1087 * Attempts to lock the folio, like folio_lock(), except that the sleep
1088 * to acquire the lock is interruptible by a fatal signal.
1089 *
1090 * Context: May sleep; see folio_lock().
1091 * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
1092 */
folio_lock_killable(struct folio * folio)1093 static inline int folio_lock_killable(struct folio *folio)
1094 {
1095 might_sleep();
1096 if (!folio_trylock(folio))
1097 return __folio_lock_killable(folio);
1098 return 0;
1099 }
1100
1101 /*
1102 * folio_lock_or_retry - Lock the folio, unless this would block and the
1103 * caller indicated that it can handle a retry.
1104 *
1105 * Return value and mmap_lock implications depend on flags; see
1106 * __folio_lock_or_retry().
1107 */
folio_lock_or_retry(struct folio * folio,struct vm_fault * vmf)1108 static inline vm_fault_t folio_lock_or_retry(struct folio *folio,
1109 struct vm_fault *vmf)
1110 {
1111 might_sleep();
1112 if (!folio_trylock(folio))
1113 return __folio_lock_or_retry(folio, vmf);
1114 return 0;
1115 }
1116
1117 /*
1118 * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
1119 * and should not be used directly.
1120 */
1121 void folio_wait_bit(struct folio *folio, int bit_nr);
1122 int folio_wait_bit_killable(struct folio *folio, int bit_nr);
1123
1124 /*
1125 * Wait for a folio to be unlocked.
1126 *
1127 * This must be called with the caller "holding" the folio,
1128 * ie with increased folio reference count so that the folio won't
1129 * go away during the wait.
1130 */
folio_wait_locked(struct folio * folio)1131 static inline void folio_wait_locked(struct folio *folio)
1132 {
1133 if (folio_test_locked(folio))
1134 folio_wait_bit(folio, PG_locked);
1135 }
1136
folio_wait_locked_killable(struct folio * folio)1137 static inline int folio_wait_locked_killable(struct folio *folio)
1138 {
1139 if (!folio_test_locked(folio))
1140 return 0;
1141 return folio_wait_bit_killable(folio, PG_locked);
1142 }
1143
wait_on_page_locked(struct page * page)1144 static inline void wait_on_page_locked(struct page *page)
1145 {
1146 folio_wait_locked(page_folio(page));
1147 }
1148
1149 void wait_on_page_writeback(struct page *page);
1150 void folio_wait_writeback(struct folio *folio);
1151 int folio_wait_writeback_killable(struct folio *folio);
1152 void end_page_writeback(struct page *page);
1153 void folio_end_writeback(struct folio *folio);
1154 void wait_for_stable_page(struct page *page);
1155 void folio_wait_stable(struct folio *folio);
1156 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
__set_page_dirty(struct page * page,struct address_space * mapping,int warn)1157 static inline void __set_page_dirty(struct page *page,
1158 struct address_space *mapping, int warn)
1159 {
1160 __folio_mark_dirty(page_folio(page), mapping, warn);
1161 }
1162 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
1163 void __folio_cancel_dirty(struct folio *folio);
folio_cancel_dirty(struct folio * folio)1164 static inline void folio_cancel_dirty(struct folio *folio)
1165 {
1166 /* Avoid atomic ops, locking, etc. when not actually needed. */
1167 if (folio_test_dirty(folio))
1168 __folio_cancel_dirty(folio);
1169 }
1170 bool folio_clear_dirty_for_io(struct folio *folio);
1171 bool clear_page_dirty_for_io(struct page *page);
1172 void folio_invalidate(struct folio *folio, size_t offset, size_t length);
1173 int __set_page_dirty_nobuffers(struct page *page);
1174 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
1175
1176 #ifdef CONFIG_MIGRATION
1177 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
1178 struct folio *src, enum migrate_mode mode);
1179 #else
1180 #define filemap_migrate_folio NULL
1181 #endif
1182 void folio_end_private_2(struct folio *folio);
1183 void folio_wait_private_2(struct folio *folio);
1184 int folio_wait_private_2_killable(struct folio *folio);
1185
1186 /*
1187 * Add an arbitrary waiter to a page's wait queue
1188 */
1189 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
1190
1191 /*
1192 * Fault in userspace address range.
1193 */
1194 size_t fault_in_writeable(char __user *uaddr, size_t size);
1195 size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
1196 size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
1197 size_t fault_in_readable(const char __user *uaddr, size_t size);
1198
1199 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
1200 pgoff_t index, gfp_t gfp);
1201 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
1202 pgoff_t index, gfp_t gfp);
1203 void filemap_remove_folio(struct folio *folio);
1204 void __filemap_remove_folio(struct folio *folio, void *shadow);
1205 void replace_page_cache_folio(struct folio *old, struct folio *new);
1206 void delete_from_page_cache_batch(struct address_space *mapping,
1207 struct folio_batch *fbatch);
1208 bool filemap_release_folio(struct folio *folio, gfp_t gfp);
1209 loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
1210 int whence);
1211
1212 /* Must be non-static for BPF error injection */
1213 int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
1214 pgoff_t index, gfp_t gfp, void **shadowp);
1215
1216 bool filemap_range_has_writeback(struct address_space *mapping,
1217 loff_t start_byte, loff_t end_byte);
1218
1219 /**
1220 * filemap_range_needs_writeback - check if range potentially needs writeback
1221 * @mapping: address space within which to check
1222 * @start_byte: offset in bytes where the range starts
1223 * @end_byte: offset in bytes where the range ends (inclusive)
1224 *
1225 * Find at least one page in the range supplied, usually used to check if
1226 * direct writing in this range will trigger a writeback. Used by O_DIRECT
1227 * read/write with IOCB_NOWAIT, to see if the caller needs to do
1228 * filemap_write_and_wait_range() before proceeding.
1229 *
1230 * Return: %true if the caller should do filemap_write_and_wait_range() before
1231 * doing O_DIRECT to a page in this range, %false otherwise.
1232 */
filemap_range_needs_writeback(struct address_space * mapping,loff_t start_byte,loff_t end_byte)1233 static inline bool filemap_range_needs_writeback(struct address_space *mapping,
1234 loff_t start_byte,
1235 loff_t end_byte)
1236 {
1237 if (!mapping->nrpages)
1238 return false;
1239 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
1240 !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
1241 return false;
1242 return filemap_range_has_writeback(mapping, start_byte, end_byte);
1243 }
1244
1245 /**
1246 * struct readahead_control - Describes a readahead request.
1247 *
1248 * A readahead request is for consecutive pages. Filesystems which
1249 * implement the ->readahead method should call readahead_page() or
1250 * readahead_page_batch() in a loop and attempt to start I/O against
1251 * each page in the request.
1252 *
1253 * Most of the fields in this struct are private and should be accessed
1254 * by the functions below.
1255 *
1256 * @file: The file, used primarily by network filesystems for authentication.
1257 * May be NULL if invoked internally by the filesystem.
1258 * @mapping: Readahead this filesystem object.
1259 * @ra: File readahead state. May be NULL.
1260 */
1261 struct readahead_control {
1262 struct file *file;
1263 struct address_space *mapping;
1264 struct file_ra_state *ra;
1265 /* private: use the readahead_* accessors instead */
1266 pgoff_t _index;
1267 unsigned int _nr_pages;
1268 unsigned int _batch_count;
1269 bool _workingset;
1270 unsigned long _pflags;
1271 };
1272
1273 #define DEFINE_READAHEAD(ractl, f, r, m, i) \
1274 struct readahead_control ractl = { \
1275 .file = f, \
1276 .mapping = m, \
1277 .ra = r, \
1278 ._index = i, \
1279 }
1280
1281 #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
1282
1283 void page_cache_ra_unbounded(struct readahead_control *,
1284 unsigned long nr_to_read, unsigned long lookahead_count);
1285 void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
1286 void page_cache_async_ra(struct readahead_control *, struct folio *,
1287 unsigned long req_count);
1288 void readahead_expand(struct readahead_control *ractl,
1289 loff_t new_start, size_t new_len);
1290
1291 /**
1292 * page_cache_sync_readahead - generic file readahead
1293 * @mapping: address_space which holds the pagecache and I/O vectors
1294 * @ra: file_ra_state which holds the readahead state
1295 * @file: Used by the filesystem for authentication.
1296 * @index: Index of first page to be read.
1297 * @req_count: Total number of pages being read by the caller.
1298 *
1299 * page_cache_sync_readahead() should be called when a cache miss happened:
1300 * it will submit the read. The readahead logic may decide to piggyback more
1301 * pages onto the read request if access patterns suggest it will improve
1302 * performance.
1303 */
1304 static inline
page_cache_sync_readahead(struct address_space * mapping,struct file_ra_state * ra,struct file * file,pgoff_t index,unsigned long req_count)1305 void page_cache_sync_readahead(struct address_space *mapping,
1306 struct file_ra_state *ra, struct file *file, pgoff_t index,
1307 unsigned long req_count)
1308 {
1309 DEFINE_READAHEAD(ractl, file, ra, mapping, index);
1310 page_cache_sync_ra(&ractl, req_count);
1311 }
1312
1313 /**
1314 * page_cache_async_readahead - file readahead for marked pages
1315 * @mapping: address_space which holds the pagecache and I/O vectors
1316 * @ra: file_ra_state which holds the readahead state
1317 * @file: Used by the filesystem for authentication.
1318 * @folio: The folio at @index which triggered the readahead call.
1319 * @index: Index of first page to be read.
1320 * @req_count: Total number of pages being read by the caller.
1321 *
1322 * page_cache_async_readahead() should be called when a page is used which
1323 * is marked as PageReadahead; this is a marker to suggest that the application
1324 * has used up enough of the readahead window that we should start pulling in
1325 * more pages.
1326 */
1327 static inline
page_cache_async_readahead(struct address_space * mapping,struct file_ra_state * ra,struct file * file,struct folio * folio,pgoff_t index,unsigned long req_count)1328 void page_cache_async_readahead(struct address_space *mapping,
1329 struct file_ra_state *ra, struct file *file,
1330 struct folio *folio, pgoff_t index, unsigned long req_count)
1331 {
1332 DEFINE_READAHEAD(ractl, file, ra, mapping, index);
1333 page_cache_async_ra(&ractl, folio, req_count);
1334 }
1335
__readahead_folio(struct readahead_control * ractl)1336 static inline struct folio *__readahead_folio(struct readahead_control *ractl)
1337 {
1338 struct folio *folio;
1339
1340 BUG_ON(ractl->_batch_count > ractl->_nr_pages);
1341 ractl->_nr_pages -= ractl->_batch_count;
1342 ractl->_index += ractl->_batch_count;
1343
1344 if (!ractl->_nr_pages) {
1345 ractl->_batch_count = 0;
1346 return NULL;
1347 }
1348
1349 folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
1350 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1351 ractl->_batch_count = folio_nr_pages(folio);
1352
1353 return folio;
1354 }
1355
1356 /**
1357 * readahead_page - Get the next page to read.
1358 * @ractl: The current readahead request.
1359 *
1360 * Context: The page is locked and has an elevated refcount. The caller
1361 * should decreases the refcount once the page has been submitted for I/O
1362 * and unlock the page once all I/O to that page has completed.
1363 * Return: A pointer to the next page, or %NULL if we are done.
1364 */
readahead_page(struct readahead_control * ractl)1365 static inline struct page *readahead_page(struct readahead_control *ractl)
1366 {
1367 struct folio *folio = __readahead_folio(ractl);
1368
1369 return &folio->page;
1370 }
1371
1372 /**
1373 * readahead_folio - Get the next folio to read.
1374 * @ractl: The current readahead request.
1375 *
1376 * Context: The folio is locked. The caller should unlock the folio once
1377 * all I/O to that folio has completed.
1378 * Return: A pointer to the next folio, or %NULL if we are done.
1379 */
readahead_folio(struct readahead_control * ractl)1380 static inline struct folio *readahead_folio(struct readahead_control *ractl)
1381 {
1382 struct folio *folio = __readahead_folio(ractl);
1383
1384 if (folio)
1385 folio_put(folio);
1386 return folio;
1387 }
1388
__readahead_batch(struct readahead_control * rac,struct page ** array,unsigned int array_sz)1389 static inline unsigned int __readahead_batch(struct readahead_control *rac,
1390 struct page **array, unsigned int array_sz)
1391 {
1392 unsigned int i = 0;
1393 XA_STATE(xas, &rac->mapping->i_pages, 0);
1394 struct page *page;
1395
1396 BUG_ON(rac->_batch_count > rac->_nr_pages);
1397 rac->_nr_pages -= rac->_batch_count;
1398 rac->_index += rac->_batch_count;
1399 rac->_batch_count = 0;
1400
1401 xas_set(&xas, rac->_index);
1402 rcu_read_lock();
1403 xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
1404 if (xas_retry(&xas, page))
1405 continue;
1406 VM_BUG_ON_PAGE(!PageLocked(page), page);
1407 VM_BUG_ON_PAGE(PageTail(page), page);
1408 array[i++] = page;
1409 rac->_batch_count += thp_nr_pages(page);
1410 if (i == array_sz)
1411 break;
1412 }
1413 rcu_read_unlock();
1414
1415 return i;
1416 }
1417
1418 /**
1419 * readahead_page_batch - Get a batch of pages to read.
1420 * @rac: The current readahead request.
1421 * @array: An array of pointers to struct page.
1422 *
1423 * Context: The pages are locked and have an elevated refcount. The caller
1424 * should decreases the refcount once the page has been submitted for I/O
1425 * and unlock the page once all I/O to that page has completed.
1426 * Return: The number of pages placed in the array. 0 indicates the request
1427 * is complete.
1428 */
1429 #define readahead_page_batch(rac, array) \
1430 __readahead_batch(rac, array, ARRAY_SIZE(array))
1431
1432 /**
1433 * readahead_pos - The byte offset into the file of this readahead request.
1434 * @rac: The readahead request.
1435 */
readahead_pos(struct readahead_control * rac)1436 static inline loff_t readahead_pos(struct readahead_control *rac)
1437 {
1438 return (loff_t)rac->_index * PAGE_SIZE;
1439 }
1440
1441 /**
1442 * readahead_length - The number of bytes in this readahead request.
1443 * @rac: The readahead request.
1444 */
readahead_length(struct readahead_control * rac)1445 static inline size_t readahead_length(struct readahead_control *rac)
1446 {
1447 return rac->_nr_pages * PAGE_SIZE;
1448 }
1449
1450 /**
1451 * readahead_index - The index of the first page in this readahead request.
1452 * @rac: The readahead request.
1453 */
readahead_index(struct readahead_control * rac)1454 static inline pgoff_t readahead_index(struct readahead_control *rac)
1455 {
1456 return rac->_index;
1457 }
1458
1459 /**
1460 * readahead_count - The number of pages in this readahead request.
1461 * @rac: The readahead request.
1462 */
readahead_count(struct readahead_control * rac)1463 static inline unsigned int readahead_count(struct readahead_control *rac)
1464 {
1465 return rac->_nr_pages;
1466 }
1467
1468 /**
1469 * readahead_batch_length - The number of bytes in the current batch.
1470 * @rac: The readahead request.
1471 */
readahead_batch_length(struct readahead_control * rac)1472 static inline size_t readahead_batch_length(struct readahead_control *rac)
1473 {
1474 return rac->_batch_count * PAGE_SIZE;
1475 }
1476
dir_pages(struct inode * inode)1477 static inline unsigned long dir_pages(struct inode *inode)
1478 {
1479 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
1480 PAGE_SHIFT;
1481 }
1482
1483 /**
1484 * folio_mkwrite_check_truncate - check if folio was truncated
1485 * @folio: the folio to check
1486 * @inode: the inode to check the folio against
1487 *
1488 * Return: the number of bytes in the folio up to EOF,
1489 * or -EFAULT if the folio was truncated.
1490 */
folio_mkwrite_check_truncate(struct folio * folio,struct inode * inode)1491 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
1492 struct inode *inode)
1493 {
1494 loff_t size = i_size_read(inode);
1495 pgoff_t index = size >> PAGE_SHIFT;
1496 size_t offset = offset_in_folio(folio, size);
1497
1498 if (!folio->mapping)
1499 return -EFAULT;
1500
1501 /* folio is wholly inside EOF */
1502 if (folio_next_index(folio) - 1 < index)
1503 return folio_size(folio);
1504 /* folio is wholly past EOF */
1505 if (folio->index > index || !offset)
1506 return -EFAULT;
1507 /* folio is partially inside EOF */
1508 return offset;
1509 }
1510
1511 /**
1512 * page_mkwrite_check_truncate - check if page was truncated
1513 * @page: the page to check
1514 * @inode: the inode to check the page against
1515 *
1516 * Returns the number of bytes in the page up to EOF,
1517 * or -EFAULT if the page was truncated.
1518 */
page_mkwrite_check_truncate(struct page * page,struct inode * inode)1519 static inline int page_mkwrite_check_truncate(struct page *page,
1520 struct inode *inode)
1521 {
1522 loff_t size = i_size_read(inode);
1523 pgoff_t index = size >> PAGE_SHIFT;
1524 int offset = offset_in_page(size);
1525
1526 if (page->mapping != inode->i_mapping)
1527 return -EFAULT;
1528
1529 /* page is wholly inside EOF */
1530 if (page->index < index)
1531 return PAGE_SIZE;
1532 /* page is wholly past EOF */
1533 if (page->index > index || !offset)
1534 return -EFAULT;
1535 /* page is partially inside EOF */
1536 return offset;
1537 }
1538
1539 /**
1540 * i_blocks_per_folio - How many blocks fit in this folio.
1541 * @inode: The inode which contains the blocks.
1542 * @folio: The folio.
1543 *
1544 * If the block size is larger than the size of this folio, return zero.
1545 *
1546 * Context: The caller should hold a refcount on the folio to prevent it
1547 * from being split.
1548 * Return: The number of filesystem blocks covered by this folio.
1549 */
1550 static inline
i_blocks_per_folio(struct inode * inode,struct folio * folio)1551 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
1552 {
1553 return folio_size(folio) >> inode->i_blkbits;
1554 }
1555
1556 static inline
i_blocks_per_page(struct inode * inode,struct page * page)1557 unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
1558 {
1559 return i_blocks_per_folio(inode, page_folio(page));
1560 }
1561 #endif /* _LINUX_PAGEMAP_H */
1562