1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
3 
4 /*
5  * Copyright 1995 Linus Torvalds
6  */
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
15 #include <linux/hardirq.h> /* for in_interrupt() */
16 #include <linux/hugetlb_inline.h>
17 
18 /*
19  * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
20  * allocation mode flags.
21  */
22 enum mapping_flags {
23 	AS_EIO		= __GFP_BITS_SHIFT + 0,	/* IO error on async write */
24 	AS_ENOSPC	= __GFP_BITS_SHIFT + 1,	/* ENOSPC on async write */
25 	AS_MM_ALL_LOCKS	= __GFP_BITS_SHIFT + 2,	/* under mm_take_all_locks() */
26 	AS_UNEVICTABLE	= __GFP_BITS_SHIFT + 3,	/* e.g., ramdisk, SHM_LOCK */
27 };
28 
mapping_set_error(struct address_space * mapping,int error)29 static inline void mapping_set_error(struct address_space *mapping, int error)
30 {
31 	if (unlikely(error)) {
32 		if (error == -ENOSPC)
33 			set_bit(AS_ENOSPC, &mapping->flags);
34 		else
35 			set_bit(AS_EIO, &mapping->flags);
36 	}
37 }
38 
mapping_set_unevictable(struct address_space * mapping)39 static inline void mapping_set_unevictable(struct address_space *mapping)
40 {
41 	set_bit(AS_UNEVICTABLE, &mapping->flags);
42 }
43 
mapping_clear_unevictable(struct address_space * mapping)44 static inline void mapping_clear_unevictable(struct address_space *mapping)
45 {
46 	clear_bit(AS_UNEVICTABLE, &mapping->flags);
47 }
48 
mapping_unevictable(struct address_space * mapping)49 static inline int mapping_unevictable(struct address_space *mapping)
50 {
51 	if (mapping)
52 		return test_bit(AS_UNEVICTABLE, &mapping->flags);
53 	return !!mapping;
54 }
55 
mapping_gfp_mask(struct address_space * mapping)56 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
57 {
58 	return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
59 }
60 
61 /*
62  * This is non-atomic.  Only to be used before the mapping is activated.
63  * Probably needs a barrier...
64  */
mapping_set_gfp_mask(struct address_space * m,gfp_t mask)65 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
66 {
67 	m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
68 				(__force unsigned long)mask;
69 }
70 
71 /*
72  * The page cache can done in larger chunks than
73  * one page, because it allows for more efficient
74  * throughput (it can then be mapped into user
75  * space in smaller chunks for same flexibility).
76  *
77  * Or rather, it _will_ be done in larger chunks.
78  */
79 #define PAGE_CACHE_SHIFT	PAGE_SHIFT
80 #define PAGE_CACHE_SIZE		PAGE_SIZE
81 #define PAGE_CACHE_MASK		PAGE_MASK
82 #define PAGE_CACHE_ALIGN(addr)	(((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
83 
84 #define page_cache_get(page)		get_page(page)
85 #define page_cache_release(page)	put_page(page)
86 void release_pages(struct page **pages, int nr, int cold);
87 
88 /*
89  * speculatively take a reference to a page.
90  * If the page is free (_count == 0), then _count is untouched, and 0
91  * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
92  *
93  * This function must be called inside the same rcu_read_lock() section as has
94  * been used to lookup the page in the pagecache radix-tree (or page table):
95  * this allows allocators to use a synchronize_rcu() to stabilize _count.
96  *
97  * Unless an RCU grace period has passed, the count of all pages coming out
98  * of the allocator must be considered unstable. page_count may return higher
99  * than expected, and put_page must be able to do the right thing when the
100  * page has been finished with, no matter what it is subsequently allocated
101  * for (because put_page is what is used here to drop an invalid speculative
102  * reference).
103  *
104  * This is the interesting part of the lockless pagecache (and lockless
105  * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
106  * has the following pattern:
107  * 1. find page in radix tree
108  * 2. conditionally increment refcount
109  * 3. check the page is still in pagecache (if no, goto 1)
110  *
111  * Remove-side that cares about stability of _count (eg. reclaim) has the
112  * following (with tree_lock held for write):
113  * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
114  * B. remove page from pagecache
115  * C. free the page
116  *
117  * There are 2 critical interleavings that matter:
118  * - 2 runs before A: in this case, A sees elevated refcount and bails out
119  * - A runs before 2: in this case, 2 sees zero refcount and retries;
120  *   subsequently, B will complete and 1 will find no page, causing the
121  *   lookup to return NULL.
122  *
123  * It is possible that between 1 and 2, the page is removed then the exact same
124  * page is inserted into the same position in pagecache. That's OK: the
125  * old find_get_page using tree_lock could equally have run before or after
126  * such a re-insertion, depending on order that locks are granted.
127  *
128  * Lookups racing against pagecache insertion isn't a big problem: either 1
129  * will find the page or it will not. Likewise, the old find_get_page could run
130  * either before the insertion or afterwards, depending on timing.
131  */
page_cache_get_speculative(struct page * page)132 static inline int page_cache_get_speculative(struct page *page)
133 {
134 	VM_BUG_ON(in_interrupt());
135 
136 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
137 # ifdef CONFIG_PREEMPT
138 	VM_BUG_ON(!in_atomic());
139 # endif
140 	/*
141 	 * Preempt must be disabled here - we rely on rcu_read_lock doing
142 	 * this for us.
143 	 *
144 	 * Pagecache won't be truncated from interrupt context, so if we have
145 	 * found a page in the radix tree here, we have pinned its refcount by
146 	 * disabling preempt, and hence no need for the "speculative get" that
147 	 * SMP requires.
148 	 */
149 	VM_BUG_ON(page_count(page) == 0);
150 	atomic_inc(&page->_count);
151 
152 #else
153 	if (unlikely(!get_page_unless_zero(page))) {
154 		/*
155 		 * Either the page has been freed, or will be freed.
156 		 * In either case, retry here and the caller should
157 		 * do the right thing (see comments above).
158 		 */
159 		return 0;
160 	}
161 #endif
162 	VM_BUG_ON(PageTail(page));
163 
164 	return 1;
165 }
166 
167 /*
168  * Same as above, but add instead of inc (could just be merged)
169  */
page_cache_add_speculative(struct page * page,int count)170 static inline int page_cache_add_speculative(struct page *page, int count)
171 {
172 	VM_BUG_ON(in_interrupt());
173 
174 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
175 # ifdef CONFIG_PREEMPT
176 	VM_BUG_ON(!in_atomic());
177 # endif
178 	VM_BUG_ON(page_count(page) == 0);
179 	atomic_add(count, &page->_count);
180 
181 #else
182 	if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
183 		return 0;
184 #endif
185 	VM_BUG_ON(PageCompound(page) && page != compound_head(page));
186 
187 	return 1;
188 }
189 
page_freeze_refs(struct page * page,int count)190 static inline int page_freeze_refs(struct page *page, int count)
191 {
192 	return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
193 }
194 
page_unfreeze_refs(struct page * page,int count)195 static inline void page_unfreeze_refs(struct page *page, int count)
196 {
197 	VM_BUG_ON(page_count(page) != 0);
198 	VM_BUG_ON(count == 0);
199 
200 	atomic_set(&page->_count, count);
201 }
202 
203 #ifdef CONFIG_NUMA
204 extern struct page *__page_cache_alloc(gfp_t gfp);
205 #else
__page_cache_alloc(gfp_t gfp)206 static inline struct page *__page_cache_alloc(gfp_t gfp)
207 {
208 	return alloc_pages(gfp, 0);
209 }
210 #endif
211 
page_cache_alloc(struct address_space * x)212 static inline struct page *page_cache_alloc(struct address_space *x)
213 {
214 	return __page_cache_alloc(mapping_gfp_mask(x));
215 }
216 
page_cache_alloc_cold(struct address_space * x)217 static inline struct page *page_cache_alloc_cold(struct address_space *x)
218 {
219 	return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
220 }
221 
222 typedef int filler_t(void *, struct page *);
223 
224 extern struct page * find_get_page(struct address_space *mapping,
225 				pgoff_t index);
226 extern struct page * find_lock_page(struct address_space *mapping,
227 				pgoff_t index);
228 extern struct page * find_or_create_page(struct address_space *mapping,
229 				pgoff_t index, gfp_t gfp_mask);
230 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
231 			unsigned int nr_pages, struct page **pages);
232 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
233 			       unsigned int nr_pages, struct page **pages);
234 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
235 			int tag, unsigned int nr_pages, struct page **pages);
236 
237 struct page *grab_cache_page_write_begin(struct address_space *mapping,
238 			pgoff_t index, unsigned flags);
239 
240 /*
241  * Returns locked page at given index in given cache, creating it if needed.
242  */
grab_cache_page(struct address_space * mapping,pgoff_t index)243 static inline struct page *grab_cache_page(struct address_space *mapping,
244 								pgoff_t index)
245 {
246 	return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
247 }
248 
249 extern struct page * grab_cache_page_nowait(struct address_space *mapping,
250 				pgoff_t index);
251 extern struct page * read_cache_page_async(struct address_space *mapping,
252 				pgoff_t index, filler_t *filler,
253 				void *data);
254 extern struct page * read_cache_page(struct address_space *mapping,
255 				pgoff_t index, filler_t *filler,
256 				void *data);
257 extern struct page * read_cache_page_gfp(struct address_space *mapping,
258 				pgoff_t index, gfp_t gfp_mask);
259 extern int read_cache_pages(struct address_space *mapping,
260 		struct list_head *pages, filler_t *filler, void *data);
261 
read_mapping_page_async(struct address_space * mapping,pgoff_t index,void * data)262 static inline struct page *read_mapping_page_async(
263 						struct address_space *mapping,
264 						     pgoff_t index, void *data)
265 {
266 	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
267 	return read_cache_page_async(mapping, index, filler, data);
268 }
269 
read_mapping_page(struct address_space * mapping,pgoff_t index,void * data)270 static inline struct page *read_mapping_page(struct address_space *mapping,
271 					     pgoff_t index, void *data)
272 {
273 	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
274 	return read_cache_page(mapping, index, filler, data);
275 }
276 
277 /*
278  * Return byte-offset into filesystem object for page.
279  */
page_offset(struct page * page)280 static inline loff_t page_offset(struct page *page)
281 {
282 	return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
283 }
284 
285 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
286 				     unsigned long address);
287 
linear_page_index(struct vm_area_struct * vma,unsigned long address)288 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
289 					unsigned long address)
290 {
291 	pgoff_t pgoff;
292 	if (unlikely(is_vm_hugetlb_page(vma)))
293 		return linear_hugepage_index(vma, address);
294 	pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
295 	pgoff += vma->vm_pgoff;
296 	return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
297 }
298 
299 extern void __lock_page(struct page *page);
300 extern int __lock_page_killable(struct page *page);
301 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
302 				unsigned int flags);
303 extern void unlock_page(struct page *page);
304 
__set_page_locked(struct page * page)305 static inline void __set_page_locked(struct page *page)
306 {
307 	__set_bit(PG_locked, &page->flags);
308 }
309 
__clear_page_locked(struct page * page)310 static inline void __clear_page_locked(struct page *page)
311 {
312 	__clear_bit(PG_locked, &page->flags);
313 }
314 
trylock_page(struct page * page)315 static inline int trylock_page(struct page *page)
316 {
317 	return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
318 }
319 
320 /*
321  * lock_page may only be called if we have the page's inode pinned.
322  */
lock_page(struct page * page)323 static inline void lock_page(struct page *page)
324 {
325 	might_sleep();
326 	if (!trylock_page(page))
327 		__lock_page(page);
328 }
329 
330 /*
331  * lock_page_killable is like lock_page but can be interrupted by fatal
332  * signals.  It returns 0 if it locked the page and -EINTR if it was
333  * killed while waiting.
334  */
lock_page_killable(struct page * page)335 static inline int lock_page_killable(struct page *page)
336 {
337 	might_sleep();
338 	if (!trylock_page(page))
339 		return __lock_page_killable(page);
340 	return 0;
341 }
342 
343 /*
344  * lock_page_or_retry - Lock the page, unless this would block and the
345  * caller indicated that it can handle a retry.
346  */
lock_page_or_retry(struct page * page,struct mm_struct * mm,unsigned int flags)347 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
348 				     unsigned int flags)
349 {
350 	might_sleep();
351 	return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
352 }
353 
354 /*
355  * This is exported only for wait_on_page_locked/wait_on_page_writeback.
356  * Never use this directly!
357  */
358 extern void wait_on_page_bit(struct page *page, int bit_nr);
359 
360 /*
361  * Wait for a page to be unlocked.
362  *
363  * This must be called with the caller "holding" the page,
364  * ie with increased "page->count" so that the page won't
365  * go away during the wait..
366  */
wait_on_page_locked(struct page * page)367 static inline void wait_on_page_locked(struct page *page)
368 {
369 	if (PageLocked(page))
370 		wait_on_page_bit(page, PG_locked);
371 }
372 
373 /*
374  * Wait for a page to complete writeback
375  */
wait_on_page_writeback(struct page * page)376 static inline void wait_on_page_writeback(struct page *page)
377 {
378 	if (PageWriteback(page))
379 		wait_on_page_bit(page, PG_writeback);
380 }
381 
382 extern void end_page_writeback(struct page *page);
383 
384 /*
385  * Add an arbitrary waiter to a page's wait queue
386  */
387 extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
388 
389 /*
390  * Fault a userspace page into pagetables.  Return non-zero on a fault.
391  *
392  * This assumes that two userspace pages are always sufficient.  That's
393  * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
394  */
fault_in_pages_writeable(char __user * uaddr,int size)395 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
396 {
397 	int ret;
398 
399 	if (unlikely(size == 0))
400 		return 0;
401 
402 	/*
403 	 * Writing zeroes into userspace here is OK, because we know that if
404 	 * the zero gets there, we'll be overwriting it.
405 	 */
406 	ret = __put_user(0, uaddr);
407 	if (ret == 0) {
408 		char __user *end = uaddr + size - 1;
409 
410 		/*
411 		 * If the page was already mapped, this will get a cache miss
412 		 * for sure, so try to avoid doing it.
413 		 */
414 		if (((unsigned long)uaddr & PAGE_MASK) !=
415 				((unsigned long)end & PAGE_MASK))
416 		 	ret = __put_user(0, end);
417 	}
418 	return ret;
419 }
420 
fault_in_pages_readable(const char __user * uaddr,int size)421 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
422 {
423 	volatile char c;
424 	int ret;
425 
426 	if (unlikely(size == 0))
427 		return 0;
428 
429 	ret = __get_user(c, uaddr);
430 	if (ret == 0) {
431 		const char __user *end = uaddr + size - 1;
432 
433 		if (((unsigned long)uaddr & PAGE_MASK) !=
434 				((unsigned long)end & PAGE_MASK)) {
435 		 	ret = __get_user(c, end);
436 			(void)c;
437 		}
438 	}
439 	return ret;
440 }
441 
442 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
443 				pgoff_t index, gfp_t gfp_mask);
444 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
445 				pgoff_t index, gfp_t gfp_mask);
446 extern void delete_from_page_cache(struct page *page);
447 extern void __delete_from_page_cache(struct page *page);
448 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
449 
450 /*
451  * Like add_to_page_cache_locked, but used to add newly allocated pages:
452  * the page is new, so we can just run __set_page_locked() against it.
453  */
add_to_page_cache(struct page * page,struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)454 static inline int add_to_page_cache(struct page *page,
455 		struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
456 {
457 	int error;
458 
459 	__set_page_locked(page);
460 	error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
461 	if (unlikely(error))
462 		__clear_page_locked(page);
463 	return error;
464 }
465 
466 #endif /* _LINUX_PAGEMAP_H */
467