1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SWAPOPS_H
3 #define _LINUX_SWAPOPS_H
4
5 #include <linux/radix-tree.h>
6 #include <linux/bug.h>
7 #include <linux/mm_types.h>
8
9 #ifdef CONFIG_MMU
10
11 #ifdef CONFIG_SWAP
12 #include <linux/swapfile.h>
13 #endif /* CONFIG_SWAP */
14
15 /*
16 * swapcache pages are stored in the swapper_space radix tree. We want to
17 * get good packing density in that tree, so the index should be dense in
18 * the low-order bits.
19 *
20 * We arrange the `type' and `offset' fields so that `type' is at the six
21 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
22 * remaining bits. Although `type' itself needs only five bits, we allow for
23 * shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry().
24 *
25 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
26 */
27 #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
28 #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
29
30 /*
31 * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To
32 * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
33 * can use the extra bits to store other information besides PFN.
34 */
35 #ifdef MAX_PHYSMEM_BITS
36 #define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
37 #else /* MAX_PHYSMEM_BITS */
38 #define SWP_PFN_BITS min_t(int, \
39 sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
40 SWP_TYPE_SHIFT)
41 #endif /* MAX_PHYSMEM_BITS */
42 #define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
43
44 /**
45 * Migration swap entry specific bitfield definitions. Layout:
46 *
47 * |----------+--------------------|
48 * | swp_type | swp_offset |
49 * |----------+--------+-+-+-------|
50 * | | resv |D|A| PFN |
51 * |----------+--------+-+-+-------|
52 *
53 * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
54 * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
55 *
56 * Note: A/D bits will be stored in migration entries iff there're enough
57 * free bits in arch specific swp offset. By default we'll ignore A/D bits
58 * when migrating a page. Please refer to migration_entry_supports_ad()
59 * for more information. If there're more bits besides PFN and A/D bits,
60 * they should be reserved and always be zeros.
61 */
62 #define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS)
63 #define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1)
64 #define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2)
65
66 #define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
67 #define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
68
69 static inline bool is_pfn_swap_entry(swp_entry_t entry);
70
71 /* Clear all flags but only keep swp_entry_t related information */
pte_swp_clear_flags(pte_t pte)72 static inline pte_t pte_swp_clear_flags(pte_t pte)
73 {
74 if (pte_swp_exclusive(pte))
75 pte = pte_swp_clear_exclusive(pte);
76 if (pte_swp_soft_dirty(pte))
77 pte = pte_swp_clear_soft_dirty(pte);
78 if (pte_swp_uffd_wp(pte))
79 pte = pte_swp_clear_uffd_wp(pte);
80 return pte;
81 }
82
83 /*
84 * Store a type+offset into a swp_entry_t in an arch-independent format
85 */
swp_entry(unsigned long type,pgoff_t offset)86 static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
87 {
88 swp_entry_t ret;
89
90 ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
91 return ret;
92 }
93
94 /*
95 * Extract the `type' field from a swp_entry_t. The swp_entry_t is in
96 * arch-independent format
97 */
swp_type(swp_entry_t entry)98 static inline unsigned swp_type(swp_entry_t entry)
99 {
100 return (entry.val >> SWP_TYPE_SHIFT);
101 }
102
103 /*
104 * Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
105 * arch-independent format
106 */
swp_offset(swp_entry_t entry)107 static inline pgoff_t swp_offset(swp_entry_t entry)
108 {
109 return entry.val & SWP_OFFSET_MASK;
110 }
111
112 /*
113 * This should only be called upon a pfn swap entry to get the PFN stored
114 * in the swap entry. Please refers to is_pfn_swap_entry() for definition
115 * of pfn swap entry.
116 */
swp_offset_pfn(swp_entry_t entry)117 static inline unsigned long swp_offset_pfn(swp_entry_t entry)
118 {
119 VM_BUG_ON(!is_pfn_swap_entry(entry));
120 return swp_offset(entry) & SWP_PFN_MASK;
121 }
122
123 /* check whether a pte points to a swap entry */
is_swap_pte(pte_t pte)124 static inline int is_swap_pte(pte_t pte)
125 {
126 return !pte_none(pte) && !pte_present(pte);
127 }
128
129 /*
130 * Convert the arch-dependent pte representation of a swp_entry_t into an
131 * arch-independent swp_entry_t.
132 */
pte_to_swp_entry(pte_t pte)133 static inline swp_entry_t pte_to_swp_entry(pte_t pte)
134 {
135 swp_entry_t arch_entry;
136
137 pte = pte_swp_clear_flags(pte);
138 arch_entry = __pte_to_swp_entry(pte);
139 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
140 }
141
142 /*
143 * Convert the arch-independent representation of a swp_entry_t into the
144 * arch-dependent pte representation.
145 */
swp_entry_to_pte(swp_entry_t entry)146 static inline pte_t swp_entry_to_pte(swp_entry_t entry)
147 {
148 swp_entry_t arch_entry;
149
150 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
151 return __swp_entry_to_pte(arch_entry);
152 }
153
radix_to_swp_entry(void * arg)154 static inline swp_entry_t radix_to_swp_entry(void *arg)
155 {
156 swp_entry_t entry;
157
158 entry.val = xa_to_value(arg);
159 return entry;
160 }
161
swp_to_radix_entry(swp_entry_t entry)162 static inline void *swp_to_radix_entry(swp_entry_t entry)
163 {
164 return xa_mk_value(entry.val);
165 }
166
167 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
make_readable_device_private_entry(pgoff_t offset)168 static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
169 {
170 return swp_entry(SWP_DEVICE_READ, offset);
171 }
172
make_writable_device_private_entry(pgoff_t offset)173 static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
174 {
175 return swp_entry(SWP_DEVICE_WRITE, offset);
176 }
177
is_device_private_entry(swp_entry_t entry)178 static inline bool is_device_private_entry(swp_entry_t entry)
179 {
180 int type = swp_type(entry);
181 return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
182 }
183
is_writable_device_private_entry(swp_entry_t entry)184 static inline bool is_writable_device_private_entry(swp_entry_t entry)
185 {
186 return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
187 }
188
make_readable_device_exclusive_entry(pgoff_t offset)189 static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
190 {
191 return swp_entry(SWP_DEVICE_EXCLUSIVE_READ, offset);
192 }
193
make_writable_device_exclusive_entry(pgoff_t offset)194 static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
195 {
196 return swp_entry(SWP_DEVICE_EXCLUSIVE_WRITE, offset);
197 }
198
is_device_exclusive_entry(swp_entry_t entry)199 static inline bool is_device_exclusive_entry(swp_entry_t entry)
200 {
201 return swp_type(entry) == SWP_DEVICE_EXCLUSIVE_READ ||
202 swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE;
203 }
204
is_writable_device_exclusive_entry(swp_entry_t entry)205 static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
206 {
207 return unlikely(swp_type(entry) == SWP_DEVICE_EXCLUSIVE_WRITE);
208 }
209 #else /* CONFIG_DEVICE_PRIVATE */
make_readable_device_private_entry(pgoff_t offset)210 static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
211 {
212 return swp_entry(0, 0);
213 }
214
make_writable_device_private_entry(pgoff_t offset)215 static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
216 {
217 return swp_entry(0, 0);
218 }
219
is_device_private_entry(swp_entry_t entry)220 static inline bool is_device_private_entry(swp_entry_t entry)
221 {
222 return false;
223 }
224
is_writable_device_private_entry(swp_entry_t entry)225 static inline bool is_writable_device_private_entry(swp_entry_t entry)
226 {
227 return false;
228 }
229
make_readable_device_exclusive_entry(pgoff_t offset)230 static inline swp_entry_t make_readable_device_exclusive_entry(pgoff_t offset)
231 {
232 return swp_entry(0, 0);
233 }
234
make_writable_device_exclusive_entry(pgoff_t offset)235 static inline swp_entry_t make_writable_device_exclusive_entry(pgoff_t offset)
236 {
237 return swp_entry(0, 0);
238 }
239
is_device_exclusive_entry(swp_entry_t entry)240 static inline bool is_device_exclusive_entry(swp_entry_t entry)
241 {
242 return false;
243 }
244
is_writable_device_exclusive_entry(swp_entry_t entry)245 static inline bool is_writable_device_exclusive_entry(swp_entry_t entry)
246 {
247 return false;
248 }
249 #endif /* CONFIG_DEVICE_PRIVATE */
250
251 #ifdef CONFIG_MIGRATION
is_migration_entry(swp_entry_t entry)252 static inline int is_migration_entry(swp_entry_t entry)
253 {
254 return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
255 swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE ||
256 swp_type(entry) == SWP_MIGRATION_WRITE);
257 }
258
is_writable_migration_entry(swp_entry_t entry)259 static inline int is_writable_migration_entry(swp_entry_t entry)
260 {
261 return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
262 }
263
is_readable_migration_entry(swp_entry_t entry)264 static inline int is_readable_migration_entry(swp_entry_t entry)
265 {
266 return unlikely(swp_type(entry) == SWP_MIGRATION_READ);
267 }
268
is_readable_exclusive_migration_entry(swp_entry_t entry)269 static inline int is_readable_exclusive_migration_entry(swp_entry_t entry)
270 {
271 return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE);
272 }
273
make_readable_migration_entry(pgoff_t offset)274 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
275 {
276 return swp_entry(SWP_MIGRATION_READ, offset);
277 }
278
make_readable_exclusive_migration_entry(pgoff_t offset)279 static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
280 {
281 return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset);
282 }
283
make_writable_migration_entry(pgoff_t offset)284 static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
285 {
286 return swp_entry(SWP_MIGRATION_WRITE, offset);
287 }
288
289 /*
290 * Returns whether the host has large enough swap offset field to support
291 * carrying over pgtable A/D bits for page migrations. The result is
292 * pretty much arch specific.
293 */
migration_entry_supports_ad(void)294 static inline bool migration_entry_supports_ad(void)
295 {
296 #ifdef CONFIG_SWAP
297 return swap_migration_ad_supported;
298 #else /* CONFIG_SWAP */
299 return false;
300 #endif /* CONFIG_SWAP */
301 }
302
make_migration_entry_young(swp_entry_t entry)303 static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
304 {
305 if (migration_entry_supports_ad())
306 return swp_entry(swp_type(entry),
307 swp_offset(entry) | SWP_MIG_YOUNG);
308 return entry;
309 }
310
is_migration_entry_young(swp_entry_t entry)311 static inline bool is_migration_entry_young(swp_entry_t entry)
312 {
313 if (migration_entry_supports_ad())
314 return swp_offset(entry) & SWP_MIG_YOUNG;
315 /* Keep the old behavior of aging page after migration */
316 return false;
317 }
318
make_migration_entry_dirty(swp_entry_t entry)319 static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
320 {
321 if (migration_entry_supports_ad())
322 return swp_entry(swp_type(entry),
323 swp_offset(entry) | SWP_MIG_DIRTY);
324 return entry;
325 }
326
is_migration_entry_dirty(swp_entry_t entry)327 static inline bool is_migration_entry_dirty(swp_entry_t entry)
328 {
329 if (migration_entry_supports_ad())
330 return swp_offset(entry) & SWP_MIG_DIRTY;
331 /* Keep the old behavior of clean page after migration */
332 return false;
333 }
334
335 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
336 unsigned long address);
337 extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
338 #else /* CONFIG_MIGRATION */
make_readable_migration_entry(pgoff_t offset)339 static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
340 {
341 return swp_entry(0, 0);
342 }
343
make_readable_exclusive_migration_entry(pgoff_t offset)344 static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
345 {
346 return swp_entry(0, 0);
347 }
348
make_writable_migration_entry(pgoff_t offset)349 static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
350 {
351 return swp_entry(0, 0);
352 }
353
is_migration_entry(swp_entry_t swp)354 static inline int is_migration_entry(swp_entry_t swp)
355 {
356 return 0;
357 }
358
migration_entry_wait(struct mm_struct * mm,pmd_t * pmd,unsigned long address)359 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
360 unsigned long address) { }
migration_entry_wait_huge(struct vm_area_struct * vma,pte_t * pte)361 static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
362 pte_t *pte) { }
is_writable_migration_entry(swp_entry_t entry)363 static inline int is_writable_migration_entry(swp_entry_t entry)
364 {
365 return 0;
366 }
is_readable_migration_entry(swp_entry_t entry)367 static inline int is_readable_migration_entry(swp_entry_t entry)
368 {
369 return 0;
370 }
371
make_migration_entry_young(swp_entry_t entry)372 static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
373 {
374 return entry;
375 }
376
is_migration_entry_young(swp_entry_t entry)377 static inline bool is_migration_entry_young(swp_entry_t entry)
378 {
379 return false;
380 }
381
make_migration_entry_dirty(swp_entry_t entry)382 static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
383 {
384 return entry;
385 }
386
is_migration_entry_dirty(swp_entry_t entry)387 static inline bool is_migration_entry_dirty(swp_entry_t entry)
388 {
389 return false;
390 }
391 #endif /* CONFIG_MIGRATION */
392
393 typedef unsigned long pte_marker;
394
395 #define PTE_MARKER_UFFD_WP BIT(0)
396 /*
397 * "Poisoned" here is meant in the very general sense of "future accesses are
398 * invalid", instead of referring very specifically to hardware memory errors.
399 * This marker is meant to represent any of various different causes of this.
400 */
401 #define PTE_MARKER_POISONED BIT(1)
402 #define PTE_MARKER_MASK (BIT(2) - 1)
403
make_pte_marker_entry(pte_marker marker)404 static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
405 {
406 return swp_entry(SWP_PTE_MARKER, marker);
407 }
408
is_pte_marker_entry(swp_entry_t entry)409 static inline bool is_pte_marker_entry(swp_entry_t entry)
410 {
411 return swp_type(entry) == SWP_PTE_MARKER;
412 }
413
pte_marker_get(swp_entry_t entry)414 static inline pte_marker pte_marker_get(swp_entry_t entry)
415 {
416 return swp_offset(entry) & PTE_MARKER_MASK;
417 }
418
is_pte_marker(pte_t pte)419 static inline bool is_pte_marker(pte_t pte)
420 {
421 return is_swap_pte(pte) && is_pte_marker_entry(pte_to_swp_entry(pte));
422 }
423
make_pte_marker(pte_marker marker)424 static inline pte_t make_pte_marker(pte_marker marker)
425 {
426 return swp_entry_to_pte(make_pte_marker_entry(marker));
427 }
428
make_poisoned_swp_entry(void)429 static inline swp_entry_t make_poisoned_swp_entry(void)
430 {
431 return make_pte_marker_entry(PTE_MARKER_POISONED);
432 }
433
is_poisoned_swp_entry(swp_entry_t entry)434 static inline int is_poisoned_swp_entry(swp_entry_t entry)
435 {
436 return is_pte_marker_entry(entry) &&
437 (pte_marker_get(entry) & PTE_MARKER_POISONED);
438 }
439
440 /*
441 * This is a special version to check pte_none() just to cover the case when
442 * the pte is a pte marker. It existed because in many cases the pte marker
443 * should be seen as a none pte; it's just that we have stored some information
444 * onto the none pte so it becomes not-none any more.
445 *
446 * It should be used when the pte is file-backed, ram-based and backing
447 * userspace pages, like shmem. It is not needed upon pgtables that do not
448 * support pte markers at all. For example, it's not needed on anonymous
449 * memory, kernel-only memory (including when the system is during-boot),
450 * non-ram based generic file-system. It's fine to be used even there, but the
451 * extra pte marker check will be pure overhead.
452 */
pte_none_mostly(pte_t pte)453 static inline int pte_none_mostly(pte_t pte)
454 {
455 return pte_none(pte) || is_pte_marker(pte);
456 }
457
pfn_swap_entry_to_page(swp_entry_t entry)458 static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
459 {
460 struct page *p = pfn_to_page(swp_offset_pfn(entry));
461
462 /*
463 * Any use of migration entries may only occur while the
464 * corresponding page is locked
465 */
466 BUG_ON(is_migration_entry(entry) && !PageLocked(p));
467
468 return p;
469 }
470
471 /*
472 * A pfn swap entry is a special type of swap entry that always has a pfn stored
473 * in the swap offset. They are used to represent unaddressable device memory
474 * and to restrict access to a page undergoing migration.
475 */
is_pfn_swap_entry(swp_entry_t entry)476 static inline bool is_pfn_swap_entry(swp_entry_t entry)
477 {
478 /* Make sure the swp offset can always store the needed fields */
479 BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
480
481 return is_migration_entry(entry) || is_device_private_entry(entry) ||
482 is_device_exclusive_entry(entry);
483 }
484
485 struct page_vma_mapped_walk;
486
487 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
488 extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
489 struct page *page);
490
491 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
492 struct page *new);
493
494 extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
495
pmd_to_swp_entry(pmd_t pmd)496 static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
497 {
498 swp_entry_t arch_entry;
499
500 if (pmd_swp_soft_dirty(pmd))
501 pmd = pmd_swp_clear_soft_dirty(pmd);
502 if (pmd_swp_uffd_wp(pmd))
503 pmd = pmd_swp_clear_uffd_wp(pmd);
504 arch_entry = __pmd_to_swp_entry(pmd);
505 return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
506 }
507
swp_entry_to_pmd(swp_entry_t entry)508 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
509 {
510 swp_entry_t arch_entry;
511
512 arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
513 return __swp_entry_to_pmd(arch_entry);
514 }
515
is_pmd_migration_entry(pmd_t pmd)516 static inline int is_pmd_migration_entry(pmd_t pmd)
517 {
518 return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
519 }
520 #else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
set_pmd_migration_entry(struct page_vma_mapped_walk * pvmw,struct page * page)521 static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
522 struct page *page)
523 {
524 BUILD_BUG();
525 }
526
remove_migration_pmd(struct page_vma_mapped_walk * pvmw,struct page * new)527 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
528 struct page *new)
529 {
530 BUILD_BUG();
531 }
532
pmd_migration_entry_wait(struct mm_struct * m,pmd_t * p)533 static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
534
pmd_to_swp_entry(pmd_t pmd)535 static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd)
536 {
537 return swp_entry(0, 0);
538 }
539
swp_entry_to_pmd(swp_entry_t entry)540 static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
541 {
542 return __pmd(0);
543 }
544
is_pmd_migration_entry(pmd_t pmd)545 static inline int is_pmd_migration_entry(pmd_t pmd)
546 {
547 return 0;
548 }
549 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
550
551 #ifdef CONFIG_MEMORY_FAILURE
552
553 /*
554 * Support for hardware poisoned pages
555 */
make_hwpoison_entry(struct page * page)556 static inline swp_entry_t make_hwpoison_entry(struct page *page)
557 {
558 BUG_ON(!PageLocked(page));
559 return swp_entry(SWP_HWPOISON, page_to_pfn(page));
560 }
561
is_hwpoison_entry(swp_entry_t entry)562 static inline int is_hwpoison_entry(swp_entry_t entry)
563 {
564 return swp_type(entry) == SWP_HWPOISON;
565 }
566
567 #else
568
make_hwpoison_entry(struct page * page)569 static inline swp_entry_t make_hwpoison_entry(struct page *page)
570 {
571 return swp_entry(0, 0);
572 }
573
is_hwpoison_entry(swp_entry_t swp)574 static inline int is_hwpoison_entry(swp_entry_t swp)
575 {
576 return 0;
577 }
578 #endif
579
non_swap_entry(swp_entry_t entry)580 static inline int non_swap_entry(swp_entry_t entry)
581 {
582 return swp_type(entry) >= MAX_SWAPFILES;
583 }
584
585 #endif /* CONFIG_MMU */
586 #endif /* _LINUX_SWAPOPS_H */
587