1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/swapfile.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 */
8
9 #include <linux/blkdev.h>
10 #include <linux/mm.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/task.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mman.h>
15 #include <linux/slab.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/swap.h>
18 #include <linux/vmalloc.h>
19 #include <linux/pagemap.h>
20 #include <linux/namei.h>
21 #include <linux/shmem_fs.h>
22 #include <linux/blk-cgroup.h>
23 #include <linux/random.h>
24 #include <linux/writeback.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/init.h>
28 #include <linux/ksm.h>
29 #include <linux/rmap.h>
30 #include <linux/security.h>
31 #include <linux/backing-dev.h>
32 #include <linux/mutex.h>
33 #include <linux/capability.h>
34 #include <linux/syscalls.h>
35 #include <linux/memcontrol.h>
36 #include <linux/poll.h>
37 #include <linux/oom.h>
38 #include <linux/frontswap.h>
39 #include <linux/swapfile.h>
40 #include <linux/export.h>
41 #include <linux/swap_slots.h>
42 #include <linux/sort.h>
43 #include <linux/completion.h>
44
45 #include <asm/tlbflush.h>
46 #include <linux/swapops.h>
47 #include <linux/swap_cgroup.h>
48 #include "swap.h"
49
50 static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
51 unsigned char);
52 static void free_swap_count_continuations(struct swap_info_struct *);
53
54 static DEFINE_SPINLOCK(swap_lock);
55 static unsigned int nr_swapfiles;
56 atomic_long_t nr_swap_pages;
57 /*
58 * Some modules use swappable objects and may try to swap them out under
59 * memory pressure (via the shrinker). Before doing so, they may wish to
60 * check to see if any swap space is available.
61 */
62 EXPORT_SYMBOL_GPL(nr_swap_pages);
63 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
64 long total_swap_pages;
65 static int least_priority = -1;
66 unsigned long swapfile_maximum_size;
67 #ifdef CONFIG_MIGRATION
68 bool swap_migration_ad_supported;
69 #endif /* CONFIG_MIGRATION */
70
71 static const char Bad_file[] = "Bad swap file entry ";
72 static const char Unused_file[] = "Unused swap file entry ";
73 static const char Bad_offset[] = "Bad swap offset entry ";
74 static const char Unused_offset[] = "Unused swap offset entry ";
75
76 /*
77 * all active swap_info_structs
78 * protected with swap_lock, and ordered by priority.
79 */
80 static PLIST_HEAD(swap_active_head);
81
82 /*
83 * all available (active, not full) swap_info_structs
84 * protected with swap_avail_lock, ordered by priority.
85 * This is used by folio_alloc_swap() instead of swap_active_head
86 * because swap_active_head includes all swap_info_structs,
87 * but folio_alloc_swap() doesn't need to look at full ones.
88 * This uses its own lock instead of swap_lock because when a
89 * swap_info_struct changes between not-full/full, it needs to
90 * add/remove itself to/from this list, but the swap_info_struct->lock
91 * is held and the locking order requires swap_lock to be taken
92 * before any swap_info_struct->lock.
93 */
94 static struct plist_head *swap_avail_heads;
95 static DEFINE_SPINLOCK(swap_avail_lock);
96
97 struct swap_info_struct *swap_info[MAX_SWAPFILES];
98
99 static DEFINE_MUTEX(swapon_mutex);
100
101 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
102 /* Activity counter to indicate that a swapon or swapoff has occurred */
103 static atomic_t proc_poll_event = ATOMIC_INIT(0);
104
105 atomic_t nr_rotate_swap = ATOMIC_INIT(0);
106
swap_type_to_swap_info(int type)107 static struct swap_info_struct *swap_type_to_swap_info(int type)
108 {
109 if (type >= MAX_SWAPFILES)
110 return NULL;
111
112 return READ_ONCE(swap_info[type]); /* rcu_dereference() */
113 }
114
swap_count(unsigned char ent)115 static inline unsigned char swap_count(unsigned char ent)
116 {
117 return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
118 }
119
120 /* Reclaim the swap entry anyway if possible */
121 #define TTRS_ANYWAY 0x1
122 /*
123 * Reclaim the swap entry if there are no more mappings of the
124 * corresponding page
125 */
126 #define TTRS_UNMAPPED 0x2
127 /* Reclaim the swap entry if swap is getting full*/
128 #define TTRS_FULL 0x4
129
130 /* returns 1 if swap entry is freed */
__try_to_reclaim_swap(struct swap_info_struct * si,unsigned long offset,unsigned long flags)131 static int __try_to_reclaim_swap(struct swap_info_struct *si,
132 unsigned long offset, unsigned long flags)
133 {
134 swp_entry_t entry = swp_entry(si->type, offset);
135 struct folio *folio;
136 int ret = 0;
137
138 folio = filemap_get_folio(swap_address_space(entry), offset);
139 if (!folio)
140 return 0;
141 /*
142 * When this function is called from scan_swap_map_slots() and it's
143 * called by vmscan.c at reclaiming folios. So we hold a folio lock
144 * here. We have to use trylock for avoiding deadlock. This is a special
145 * case and you should use folio_free_swap() with explicit folio_lock()
146 * in usual operations.
147 */
148 if (folio_trylock(folio)) {
149 if ((flags & TTRS_ANYWAY) ||
150 ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
151 ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)))
152 ret = folio_free_swap(folio);
153 folio_unlock(folio);
154 }
155 folio_put(folio);
156 return ret;
157 }
158
first_se(struct swap_info_struct * sis)159 static inline struct swap_extent *first_se(struct swap_info_struct *sis)
160 {
161 struct rb_node *rb = rb_first(&sis->swap_extent_root);
162 return rb_entry(rb, struct swap_extent, rb_node);
163 }
164
next_se(struct swap_extent * se)165 static inline struct swap_extent *next_se(struct swap_extent *se)
166 {
167 struct rb_node *rb = rb_next(&se->rb_node);
168 return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
169 }
170
171 /*
172 * swapon tell device that all the old swap contents can be discarded,
173 * to allow the swap device to optimize its wear-levelling.
174 */
discard_swap(struct swap_info_struct * si)175 static int discard_swap(struct swap_info_struct *si)
176 {
177 struct swap_extent *se;
178 sector_t start_block;
179 sector_t nr_blocks;
180 int err = 0;
181
182 /* Do not discard the swap header page! */
183 se = first_se(si);
184 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
185 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
186 if (nr_blocks) {
187 err = blkdev_issue_discard(si->bdev, start_block,
188 nr_blocks, GFP_KERNEL);
189 if (err)
190 return err;
191 cond_resched();
192 }
193
194 for (se = next_se(se); se; se = next_se(se)) {
195 start_block = se->start_block << (PAGE_SHIFT - 9);
196 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
197
198 err = blkdev_issue_discard(si->bdev, start_block,
199 nr_blocks, GFP_KERNEL);
200 if (err)
201 break;
202
203 cond_resched();
204 }
205 return err; /* That will often be -EOPNOTSUPP */
206 }
207
208 static struct swap_extent *
offset_to_swap_extent(struct swap_info_struct * sis,unsigned long offset)209 offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
210 {
211 struct swap_extent *se;
212 struct rb_node *rb;
213
214 rb = sis->swap_extent_root.rb_node;
215 while (rb) {
216 se = rb_entry(rb, struct swap_extent, rb_node);
217 if (offset < se->start_page)
218 rb = rb->rb_left;
219 else if (offset >= se->start_page + se->nr_pages)
220 rb = rb->rb_right;
221 else
222 return se;
223 }
224 /* It *must* be present */
225 BUG();
226 }
227
swap_page_sector(struct page * page)228 sector_t swap_page_sector(struct page *page)
229 {
230 struct swap_info_struct *sis = page_swap_info(page);
231 struct swap_extent *se;
232 sector_t sector;
233 pgoff_t offset;
234
235 offset = __page_file_index(page);
236 se = offset_to_swap_extent(sis, offset);
237 sector = se->start_block + (offset - se->start_page);
238 return sector << (PAGE_SHIFT - 9);
239 }
240
241 /*
242 * swap allocation tell device that a cluster of swap can now be discarded,
243 * to allow the swap device to optimize its wear-levelling.
244 */
discard_swap_cluster(struct swap_info_struct * si,pgoff_t start_page,pgoff_t nr_pages)245 static void discard_swap_cluster(struct swap_info_struct *si,
246 pgoff_t start_page, pgoff_t nr_pages)
247 {
248 struct swap_extent *se = offset_to_swap_extent(si, start_page);
249
250 while (nr_pages) {
251 pgoff_t offset = start_page - se->start_page;
252 sector_t start_block = se->start_block + offset;
253 sector_t nr_blocks = se->nr_pages - offset;
254
255 if (nr_blocks > nr_pages)
256 nr_blocks = nr_pages;
257 start_page += nr_blocks;
258 nr_pages -= nr_blocks;
259
260 start_block <<= PAGE_SHIFT - 9;
261 nr_blocks <<= PAGE_SHIFT - 9;
262 if (blkdev_issue_discard(si->bdev, start_block,
263 nr_blocks, GFP_NOIO))
264 break;
265
266 se = next_se(se);
267 }
268 }
269
270 #ifdef CONFIG_THP_SWAP
271 #define SWAPFILE_CLUSTER HPAGE_PMD_NR
272
273 #define swap_entry_size(size) (size)
274 #else
275 #define SWAPFILE_CLUSTER 256
276
277 /*
278 * Define swap_entry_size() as constant to let compiler to optimize
279 * out some code if !CONFIG_THP_SWAP
280 */
281 #define swap_entry_size(size) 1
282 #endif
283 #define LATENCY_LIMIT 256
284
cluster_set_flag(struct swap_cluster_info * info,unsigned int flag)285 static inline void cluster_set_flag(struct swap_cluster_info *info,
286 unsigned int flag)
287 {
288 info->flags = flag;
289 }
290
cluster_count(struct swap_cluster_info * info)291 static inline unsigned int cluster_count(struct swap_cluster_info *info)
292 {
293 return info->data;
294 }
295
cluster_set_count(struct swap_cluster_info * info,unsigned int c)296 static inline void cluster_set_count(struct swap_cluster_info *info,
297 unsigned int c)
298 {
299 info->data = c;
300 }
301
cluster_set_count_flag(struct swap_cluster_info * info,unsigned int c,unsigned int f)302 static inline void cluster_set_count_flag(struct swap_cluster_info *info,
303 unsigned int c, unsigned int f)
304 {
305 info->flags = f;
306 info->data = c;
307 }
308
cluster_next(struct swap_cluster_info * info)309 static inline unsigned int cluster_next(struct swap_cluster_info *info)
310 {
311 return info->data;
312 }
313
cluster_set_next(struct swap_cluster_info * info,unsigned int n)314 static inline void cluster_set_next(struct swap_cluster_info *info,
315 unsigned int n)
316 {
317 info->data = n;
318 }
319
cluster_set_next_flag(struct swap_cluster_info * info,unsigned int n,unsigned int f)320 static inline void cluster_set_next_flag(struct swap_cluster_info *info,
321 unsigned int n, unsigned int f)
322 {
323 info->flags = f;
324 info->data = n;
325 }
326
cluster_is_free(struct swap_cluster_info * info)327 static inline bool cluster_is_free(struct swap_cluster_info *info)
328 {
329 return info->flags & CLUSTER_FLAG_FREE;
330 }
331
cluster_is_null(struct swap_cluster_info * info)332 static inline bool cluster_is_null(struct swap_cluster_info *info)
333 {
334 return info->flags & CLUSTER_FLAG_NEXT_NULL;
335 }
336
cluster_set_null(struct swap_cluster_info * info)337 static inline void cluster_set_null(struct swap_cluster_info *info)
338 {
339 info->flags = CLUSTER_FLAG_NEXT_NULL;
340 info->data = 0;
341 }
342
cluster_is_huge(struct swap_cluster_info * info)343 static inline bool cluster_is_huge(struct swap_cluster_info *info)
344 {
345 if (IS_ENABLED(CONFIG_THP_SWAP))
346 return info->flags & CLUSTER_FLAG_HUGE;
347 return false;
348 }
349
cluster_clear_huge(struct swap_cluster_info * info)350 static inline void cluster_clear_huge(struct swap_cluster_info *info)
351 {
352 info->flags &= ~CLUSTER_FLAG_HUGE;
353 }
354
lock_cluster(struct swap_info_struct * si,unsigned long offset)355 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
356 unsigned long offset)
357 {
358 struct swap_cluster_info *ci;
359
360 ci = si->cluster_info;
361 if (ci) {
362 ci += offset / SWAPFILE_CLUSTER;
363 spin_lock(&ci->lock);
364 }
365 return ci;
366 }
367
unlock_cluster(struct swap_cluster_info * ci)368 static inline void unlock_cluster(struct swap_cluster_info *ci)
369 {
370 if (ci)
371 spin_unlock(&ci->lock);
372 }
373
374 /*
375 * Determine the locking method in use for this device. Return
376 * swap_cluster_info if SSD-style cluster-based locking is in place.
377 */
lock_cluster_or_swap_info(struct swap_info_struct * si,unsigned long offset)378 static inline struct swap_cluster_info *lock_cluster_or_swap_info(
379 struct swap_info_struct *si, unsigned long offset)
380 {
381 struct swap_cluster_info *ci;
382
383 /* Try to use fine-grained SSD-style locking if available: */
384 ci = lock_cluster(si, offset);
385 /* Otherwise, fall back to traditional, coarse locking: */
386 if (!ci)
387 spin_lock(&si->lock);
388
389 return ci;
390 }
391
unlock_cluster_or_swap_info(struct swap_info_struct * si,struct swap_cluster_info * ci)392 static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
393 struct swap_cluster_info *ci)
394 {
395 if (ci)
396 unlock_cluster(ci);
397 else
398 spin_unlock(&si->lock);
399 }
400
cluster_list_empty(struct swap_cluster_list * list)401 static inline bool cluster_list_empty(struct swap_cluster_list *list)
402 {
403 return cluster_is_null(&list->head);
404 }
405
cluster_list_first(struct swap_cluster_list * list)406 static inline unsigned int cluster_list_first(struct swap_cluster_list *list)
407 {
408 return cluster_next(&list->head);
409 }
410
cluster_list_init(struct swap_cluster_list * list)411 static void cluster_list_init(struct swap_cluster_list *list)
412 {
413 cluster_set_null(&list->head);
414 cluster_set_null(&list->tail);
415 }
416
cluster_list_add_tail(struct swap_cluster_list * list,struct swap_cluster_info * ci,unsigned int idx)417 static void cluster_list_add_tail(struct swap_cluster_list *list,
418 struct swap_cluster_info *ci,
419 unsigned int idx)
420 {
421 if (cluster_list_empty(list)) {
422 cluster_set_next_flag(&list->head, idx, 0);
423 cluster_set_next_flag(&list->tail, idx, 0);
424 } else {
425 struct swap_cluster_info *ci_tail;
426 unsigned int tail = cluster_next(&list->tail);
427
428 /*
429 * Nested cluster lock, but both cluster locks are
430 * only acquired when we held swap_info_struct->lock
431 */
432 ci_tail = ci + tail;
433 spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING);
434 cluster_set_next(ci_tail, idx);
435 spin_unlock(&ci_tail->lock);
436 cluster_set_next_flag(&list->tail, idx, 0);
437 }
438 }
439
cluster_list_del_first(struct swap_cluster_list * list,struct swap_cluster_info * ci)440 static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
441 struct swap_cluster_info *ci)
442 {
443 unsigned int idx;
444
445 idx = cluster_next(&list->head);
446 if (cluster_next(&list->tail) == idx) {
447 cluster_set_null(&list->head);
448 cluster_set_null(&list->tail);
449 } else
450 cluster_set_next_flag(&list->head,
451 cluster_next(&ci[idx]), 0);
452
453 return idx;
454 }
455
456 /* Add a cluster to discard list and schedule it to do discard */
swap_cluster_schedule_discard(struct swap_info_struct * si,unsigned int idx)457 static void swap_cluster_schedule_discard(struct swap_info_struct *si,
458 unsigned int idx)
459 {
460 /*
461 * If scan_swap_map_slots() can't find a free cluster, it will check
462 * si->swap_map directly. To make sure the discarding cluster isn't
463 * taken by scan_swap_map_slots(), mark the swap entries bad (occupied).
464 * It will be cleared after discard
465 */
466 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
467 SWAP_MAP_BAD, SWAPFILE_CLUSTER);
468
469 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
470
471 schedule_work(&si->discard_work);
472 }
473
__free_cluster(struct swap_info_struct * si,unsigned long idx)474 static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
475 {
476 struct swap_cluster_info *ci = si->cluster_info;
477
478 cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE);
479 cluster_list_add_tail(&si->free_clusters, ci, idx);
480 }
481
482 /*
483 * Doing discard actually. After a cluster discard is finished, the cluster
484 * will be added to free cluster list. caller should hold si->lock.
485 */
swap_do_scheduled_discard(struct swap_info_struct * si)486 static void swap_do_scheduled_discard(struct swap_info_struct *si)
487 {
488 struct swap_cluster_info *info, *ci;
489 unsigned int idx;
490
491 info = si->cluster_info;
492
493 while (!cluster_list_empty(&si->discard_clusters)) {
494 idx = cluster_list_del_first(&si->discard_clusters, info);
495 spin_unlock(&si->lock);
496
497 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
498 SWAPFILE_CLUSTER);
499
500 spin_lock(&si->lock);
501 ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
502 __free_cluster(si, idx);
503 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
504 0, SWAPFILE_CLUSTER);
505 unlock_cluster(ci);
506 }
507 }
508
swap_discard_work(struct work_struct * work)509 static void swap_discard_work(struct work_struct *work)
510 {
511 struct swap_info_struct *si;
512
513 si = container_of(work, struct swap_info_struct, discard_work);
514
515 spin_lock(&si->lock);
516 swap_do_scheduled_discard(si);
517 spin_unlock(&si->lock);
518 }
519
swap_users_ref_free(struct percpu_ref * ref)520 static void swap_users_ref_free(struct percpu_ref *ref)
521 {
522 struct swap_info_struct *si;
523
524 si = container_of(ref, struct swap_info_struct, users);
525 complete(&si->comp);
526 }
527
alloc_cluster(struct swap_info_struct * si,unsigned long idx)528 static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
529 {
530 struct swap_cluster_info *ci = si->cluster_info;
531
532 VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
533 cluster_list_del_first(&si->free_clusters, ci);
534 cluster_set_count_flag(ci + idx, 0, 0);
535 }
536
free_cluster(struct swap_info_struct * si,unsigned long idx)537 static void free_cluster(struct swap_info_struct *si, unsigned long idx)
538 {
539 struct swap_cluster_info *ci = si->cluster_info + idx;
540
541 VM_BUG_ON(cluster_count(ci) != 0);
542 /*
543 * If the swap is discardable, prepare discard the cluster
544 * instead of free it immediately. The cluster will be freed
545 * after discard.
546 */
547 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
548 (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
549 swap_cluster_schedule_discard(si, idx);
550 return;
551 }
552
553 __free_cluster(si, idx);
554 }
555
556 /*
557 * The cluster corresponding to page_nr will be used. The cluster will be
558 * removed from free cluster list and its usage counter will be increased.
559 */
inc_cluster_info_page(struct swap_info_struct * p,struct swap_cluster_info * cluster_info,unsigned long page_nr)560 static void inc_cluster_info_page(struct swap_info_struct *p,
561 struct swap_cluster_info *cluster_info, unsigned long page_nr)
562 {
563 unsigned long idx = page_nr / SWAPFILE_CLUSTER;
564
565 if (!cluster_info)
566 return;
567 if (cluster_is_free(&cluster_info[idx]))
568 alloc_cluster(p, idx);
569
570 VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER);
571 cluster_set_count(&cluster_info[idx],
572 cluster_count(&cluster_info[idx]) + 1);
573 }
574
575 /*
576 * The cluster corresponding to page_nr decreases one usage. If the usage
577 * counter becomes 0, which means no page in the cluster is in using, we can
578 * optionally discard the cluster and add it to free cluster list.
579 */
dec_cluster_info_page(struct swap_info_struct * p,struct swap_cluster_info * cluster_info,unsigned long page_nr)580 static void dec_cluster_info_page(struct swap_info_struct *p,
581 struct swap_cluster_info *cluster_info, unsigned long page_nr)
582 {
583 unsigned long idx = page_nr / SWAPFILE_CLUSTER;
584
585 if (!cluster_info)
586 return;
587
588 VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0);
589 cluster_set_count(&cluster_info[idx],
590 cluster_count(&cluster_info[idx]) - 1);
591
592 if (cluster_count(&cluster_info[idx]) == 0)
593 free_cluster(p, idx);
594 }
595
596 /*
597 * It's possible scan_swap_map_slots() uses a free cluster in the middle of free
598 * cluster list. Avoiding such abuse to avoid list corruption.
599 */
600 static bool
scan_swap_map_ssd_cluster_conflict(struct swap_info_struct * si,unsigned long offset)601 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
602 unsigned long offset)
603 {
604 struct percpu_cluster *percpu_cluster;
605 bool conflict;
606
607 offset /= SWAPFILE_CLUSTER;
608 conflict = !cluster_list_empty(&si->free_clusters) &&
609 offset != cluster_list_first(&si->free_clusters) &&
610 cluster_is_free(&si->cluster_info[offset]);
611
612 if (!conflict)
613 return false;
614
615 percpu_cluster = this_cpu_ptr(si->percpu_cluster);
616 cluster_set_null(&percpu_cluster->index);
617 return true;
618 }
619
620 /*
621 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This
622 * might involve allocating a new cluster for current CPU too.
623 */
scan_swap_map_try_ssd_cluster(struct swap_info_struct * si,unsigned long * offset,unsigned long * scan_base)624 static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
625 unsigned long *offset, unsigned long *scan_base)
626 {
627 struct percpu_cluster *cluster;
628 struct swap_cluster_info *ci;
629 unsigned long tmp, max;
630
631 new_cluster:
632 cluster = this_cpu_ptr(si->percpu_cluster);
633 if (cluster_is_null(&cluster->index)) {
634 if (!cluster_list_empty(&si->free_clusters)) {
635 cluster->index = si->free_clusters.head;
636 cluster->next = cluster_next(&cluster->index) *
637 SWAPFILE_CLUSTER;
638 } else if (!cluster_list_empty(&si->discard_clusters)) {
639 /*
640 * we don't have free cluster but have some clusters in
641 * discarding, do discard now and reclaim them, then
642 * reread cluster_next_cpu since we dropped si->lock
643 */
644 swap_do_scheduled_discard(si);
645 *scan_base = this_cpu_read(*si->cluster_next_cpu);
646 *offset = *scan_base;
647 goto new_cluster;
648 } else
649 return false;
650 }
651
652 /*
653 * Other CPUs can use our cluster if they can't find a free cluster,
654 * check if there is still free entry in the cluster
655 */
656 tmp = cluster->next;
657 max = min_t(unsigned long, si->max,
658 (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER);
659 if (tmp < max) {
660 ci = lock_cluster(si, tmp);
661 while (tmp < max) {
662 if (!si->swap_map[tmp])
663 break;
664 tmp++;
665 }
666 unlock_cluster(ci);
667 }
668 if (tmp >= max) {
669 cluster_set_null(&cluster->index);
670 goto new_cluster;
671 }
672 cluster->next = tmp + 1;
673 *offset = tmp;
674 *scan_base = tmp;
675 return true;
676 }
677
__del_from_avail_list(struct swap_info_struct * p)678 static void __del_from_avail_list(struct swap_info_struct *p)
679 {
680 int nid;
681
682 for_each_node(nid)
683 plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]);
684 }
685
del_from_avail_list(struct swap_info_struct * p)686 static void del_from_avail_list(struct swap_info_struct *p)
687 {
688 spin_lock(&swap_avail_lock);
689 __del_from_avail_list(p);
690 spin_unlock(&swap_avail_lock);
691 }
692
swap_range_alloc(struct swap_info_struct * si,unsigned long offset,unsigned int nr_entries)693 static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
694 unsigned int nr_entries)
695 {
696 unsigned int end = offset + nr_entries - 1;
697
698 if (offset == si->lowest_bit)
699 si->lowest_bit += nr_entries;
700 if (end == si->highest_bit)
701 WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
702 WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries);
703 if (si->inuse_pages == si->pages) {
704 si->lowest_bit = si->max;
705 si->highest_bit = 0;
706 del_from_avail_list(si);
707 }
708 }
709
add_to_avail_list(struct swap_info_struct * p)710 static void add_to_avail_list(struct swap_info_struct *p)
711 {
712 int nid;
713
714 spin_lock(&swap_avail_lock);
715 for_each_node(nid) {
716 WARN_ON(!plist_node_empty(&p->avail_lists[nid]));
717 plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]);
718 }
719 spin_unlock(&swap_avail_lock);
720 }
721
swap_range_free(struct swap_info_struct * si,unsigned long offset,unsigned int nr_entries)722 static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
723 unsigned int nr_entries)
724 {
725 unsigned long begin = offset;
726 unsigned long end = offset + nr_entries - 1;
727 void (*swap_slot_free_notify)(struct block_device *, unsigned long);
728
729 if (offset < si->lowest_bit)
730 si->lowest_bit = offset;
731 if (end > si->highest_bit) {
732 bool was_full = !si->highest_bit;
733
734 WRITE_ONCE(si->highest_bit, end);
735 if (was_full && (si->flags & SWP_WRITEOK))
736 add_to_avail_list(si);
737 }
738 atomic_long_add(nr_entries, &nr_swap_pages);
739 WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
740 if (si->flags & SWP_BLKDEV)
741 swap_slot_free_notify =
742 si->bdev->bd_disk->fops->swap_slot_free_notify;
743 else
744 swap_slot_free_notify = NULL;
745 while (offset <= end) {
746 arch_swap_invalidate_page(si->type, offset);
747 frontswap_invalidate_page(si->type, offset);
748 if (swap_slot_free_notify)
749 swap_slot_free_notify(si->bdev, offset);
750 offset++;
751 }
752 clear_shadow_from_swap_cache(si->type, begin, end);
753 }
754
set_cluster_next(struct swap_info_struct * si,unsigned long next)755 static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
756 {
757 unsigned long prev;
758
759 if (!(si->flags & SWP_SOLIDSTATE)) {
760 si->cluster_next = next;
761 return;
762 }
763
764 prev = this_cpu_read(*si->cluster_next_cpu);
765 /*
766 * Cross the swap address space size aligned trunk, choose
767 * another trunk randomly to avoid lock contention on swap
768 * address space if possible.
769 */
770 if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) !=
771 (next >> SWAP_ADDRESS_SPACE_SHIFT)) {
772 /* No free swap slots available */
773 if (si->highest_bit <= si->lowest_bit)
774 return;
775 next = si->lowest_bit +
776 prandom_u32_max(si->highest_bit - si->lowest_bit + 1);
777 next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES);
778 next = max_t(unsigned int, next, si->lowest_bit);
779 }
780 this_cpu_write(*si->cluster_next_cpu, next);
781 }
782
swap_offset_available_and_locked(struct swap_info_struct * si,unsigned long offset)783 static bool swap_offset_available_and_locked(struct swap_info_struct *si,
784 unsigned long offset)
785 {
786 if (data_race(!si->swap_map[offset])) {
787 spin_lock(&si->lock);
788 return true;
789 }
790
791 if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
792 spin_lock(&si->lock);
793 return true;
794 }
795
796 return false;
797 }
798
scan_swap_map_slots(struct swap_info_struct * si,unsigned char usage,int nr,swp_entry_t slots[])799 static int scan_swap_map_slots(struct swap_info_struct *si,
800 unsigned char usage, int nr,
801 swp_entry_t slots[])
802 {
803 struct swap_cluster_info *ci;
804 unsigned long offset;
805 unsigned long scan_base;
806 unsigned long last_in_cluster = 0;
807 int latency_ration = LATENCY_LIMIT;
808 int n_ret = 0;
809 bool scanned_many = false;
810
811 /*
812 * We try to cluster swap pages by allocating them sequentially
813 * in swap. Once we've allocated SWAPFILE_CLUSTER pages this
814 * way, however, we resort to first-free allocation, starting
815 * a new cluster. This prevents us from scattering swap pages
816 * all over the entire swap partition, so that we reduce
817 * overall disk seek times between swap pages. -- sct
818 * But we do now try to find an empty cluster. -Andrea
819 * And we let swap pages go all over an SSD partition. Hugh
820 */
821
822 si->flags += SWP_SCANNING;
823 /*
824 * Use percpu scan base for SSD to reduce lock contention on
825 * cluster and swap cache. For HDD, sequential access is more
826 * important.
827 */
828 if (si->flags & SWP_SOLIDSTATE)
829 scan_base = this_cpu_read(*si->cluster_next_cpu);
830 else
831 scan_base = si->cluster_next;
832 offset = scan_base;
833
834 /* SSD algorithm */
835 if (si->cluster_info) {
836 if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
837 goto scan;
838 } else if (unlikely(!si->cluster_nr--)) {
839 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
840 si->cluster_nr = SWAPFILE_CLUSTER - 1;
841 goto checks;
842 }
843
844 spin_unlock(&si->lock);
845
846 /*
847 * If seek is expensive, start searching for new cluster from
848 * start of partition, to minimize the span of allocated swap.
849 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
850 * case, just handled by scan_swap_map_try_ssd_cluster() above.
851 */
852 scan_base = offset = si->lowest_bit;
853 last_in_cluster = offset + SWAPFILE_CLUSTER - 1;
854
855 /* Locate the first empty (unaligned) cluster */
856 for (; last_in_cluster <= si->highest_bit; offset++) {
857 if (si->swap_map[offset])
858 last_in_cluster = offset + SWAPFILE_CLUSTER;
859 else if (offset == last_in_cluster) {
860 spin_lock(&si->lock);
861 offset -= SWAPFILE_CLUSTER - 1;
862 si->cluster_next = offset;
863 si->cluster_nr = SWAPFILE_CLUSTER - 1;
864 goto checks;
865 }
866 if (unlikely(--latency_ration < 0)) {
867 cond_resched();
868 latency_ration = LATENCY_LIMIT;
869 }
870 }
871
872 offset = scan_base;
873 spin_lock(&si->lock);
874 si->cluster_nr = SWAPFILE_CLUSTER - 1;
875 }
876
877 checks:
878 if (si->cluster_info) {
879 while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
880 /* take a break if we already got some slots */
881 if (n_ret)
882 goto done;
883 if (!scan_swap_map_try_ssd_cluster(si, &offset,
884 &scan_base))
885 goto scan;
886 }
887 }
888 if (!(si->flags & SWP_WRITEOK))
889 goto no_page;
890 if (!si->highest_bit)
891 goto no_page;
892 if (offset > si->highest_bit)
893 scan_base = offset = si->lowest_bit;
894
895 ci = lock_cluster(si, offset);
896 /* reuse swap entry of cache-only swap if not busy. */
897 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
898 int swap_was_freed;
899 unlock_cluster(ci);
900 spin_unlock(&si->lock);
901 swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
902 spin_lock(&si->lock);
903 /* entry was freed successfully, try to use this again */
904 if (swap_was_freed)
905 goto checks;
906 goto scan; /* check next one */
907 }
908
909 if (si->swap_map[offset]) {
910 unlock_cluster(ci);
911 if (!n_ret)
912 goto scan;
913 else
914 goto done;
915 }
916 WRITE_ONCE(si->swap_map[offset], usage);
917 inc_cluster_info_page(si, si->cluster_info, offset);
918 unlock_cluster(ci);
919
920 swap_range_alloc(si, offset, 1);
921 slots[n_ret++] = swp_entry(si->type, offset);
922
923 /* got enough slots or reach max slots? */
924 if ((n_ret == nr) || (offset >= si->highest_bit))
925 goto done;
926
927 /* search for next available slot */
928
929 /* time to take a break? */
930 if (unlikely(--latency_ration < 0)) {
931 if (n_ret)
932 goto done;
933 spin_unlock(&si->lock);
934 cond_resched();
935 spin_lock(&si->lock);
936 latency_ration = LATENCY_LIMIT;
937 }
938
939 /* try to get more slots in cluster */
940 if (si->cluster_info) {
941 if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
942 goto checks;
943 } else if (si->cluster_nr && !si->swap_map[++offset]) {
944 /* non-ssd case, still more slots in cluster? */
945 --si->cluster_nr;
946 goto checks;
947 }
948
949 /*
950 * Even if there's no free clusters available (fragmented),
951 * try to scan a little more quickly with lock held unless we
952 * have scanned too many slots already.
953 */
954 if (!scanned_many) {
955 unsigned long scan_limit;
956
957 if (offset < scan_base)
958 scan_limit = scan_base;
959 else
960 scan_limit = si->highest_bit;
961 for (; offset <= scan_limit && --latency_ration > 0;
962 offset++) {
963 if (!si->swap_map[offset])
964 goto checks;
965 }
966 }
967
968 done:
969 set_cluster_next(si, offset + 1);
970 si->flags -= SWP_SCANNING;
971 return n_ret;
972
973 scan:
974 spin_unlock(&si->lock);
975 while (++offset <= READ_ONCE(si->highest_bit)) {
976 if (unlikely(--latency_ration < 0)) {
977 cond_resched();
978 latency_ration = LATENCY_LIMIT;
979 scanned_many = true;
980 }
981 if (swap_offset_available_and_locked(si, offset))
982 goto checks;
983 }
984 offset = si->lowest_bit;
985 while (offset < scan_base) {
986 if (unlikely(--latency_ration < 0)) {
987 cond_resched();
988 latency_ration = LATENCY_LIMIT;
989 scanned_many = true;
990 }
991 if (swap_offset_available_and_locked(si, offset))
992 goto checks;
993 offset++;
994 }
995 spin_lock(&si->lock);
996
997 no_page:
998 si->flags -= SWP_SCANNING;
999 return n_ret;
1000 }
1001
swap_alloc_cluster(struct swap_info_struct * si,swp_entry_t * slot)1002 static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
1003 {
1004 unsigned long idx;
1005 struct swap_cluster_info *ci;
1006 unsigned long offset;
1007
1008 /*
1009 * Should not even be attempting cluster allocations when huge
1010 * page swap is disabled. Warn and fail the allocation.
1011 */
1012 if (!IS_ENABLED(CONFIG_THP_SWAP)) {
1013 VM_WARN_ON_ONCE(1);
1014 return 0;
1015 }
1016
1017 if (cluster_list_empty(&si->free_clusters))
1018 return 0;
1019
1020 idx = cluster_list_first(&si->free_clusters);
1021 offset = idx * SWAPFILE_CLUSTER;
1022 ci = lock_cluster(si, offset);
1023 alloc_cluster(si, idx);
1024 cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE);
1025
1026 memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER);
1027 unlock_cluster(ci);
1028 swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
1029 *slot = swp_entry(si->type, offset);
1030
1031 return 1;
1032 }
1033
swap_free_cluster(struct swap_info_struct * si,unsigned long idx)1034 static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
1035 {
1036 unsigned long offset = idx * SWAPFILE_CLUSTER;
1037 struct swap_cluster_info *ci;
1038
1039 ci = lock_cluster(si, offset);
1040 memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
1041 cluster_set_count_flag(ci, 0, 0);
1042 free_cluster(si, idx);
1043 unlock_cluster(ci);
1044 swap_range_free(si, offset, SWAPFILE_CLUSTER);
1045 }
1046
get_swap_pages(int n_goal,swp_entry_t swp_entries[],int entry_size)1047 int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size)
1048 {
1049 unsigned long size = swap_entry_size(entry_size);
1050 struct swap_info_struct *si, *next;
1051 long avail_pgs;
1052 int n_ret = 0;
1053 int node;
1054
1055 /* Only single cluster request supported */
1056 WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
1057
1058 spin_lock(&swap_avail_lock);
1059
1060 avail_pgs = atomic_long_read(&nr_swap_pages) / size;
1061 if (avail_pgs <= 0) {
1062 spin_unlock(&swap_avail_lock);
1063 goto noswap;
1064 }
1065
1066 n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
1067
1068 atomic_long_sub(n_goal * size, &nr_swap_pages);
1069
1070 start_over:
1071 node = numa_node_id();
1072 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1073 /* requeue si to after same-priority siblings */
1074 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1075 spin_unlock(&swap_avail_lock);
1076 spin_lock(&si->lock);
1077 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
1078 spin_lock(&swap_avail_lock);
1079 if (plist_node_empty(&si->avail_lists[node])) {
1080 spin_unlock(&si->lock);
1081 goto nextsi;
1082 }
1083 WARN(!si->highest_bit,
1084 "swap_info %d in list but !highest_bit\n",
1085 si->type);
1086 WARN(!(si->flags & SWP_WRITEOK),
1087 "swap_info %d in list but !SWP_WRITEOK\n",
1088 si->type);
1089 __del_from_avail_list(si);
1090 spin_unlock(&si->lock);
1091 goto nextsi;
1092 }
1093 if (size == SWAPFILE_CLUSTER) {
1094 if (si->flags & SWP_BLKDEV)
1095 n_ret = swap_alloc_cluster(si, swp_entries);
1096 } else
1097 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1098 n_goal, swp_entries);
1099 spin_unlock(&si->lock);
1100 if (n_ret || size == SWAPFILE_CLUSTER)
1101 goto check_out;
1102 pr_debug("scan_swap_map of si %d failed to find offset\n",
1103 si->type);
1104
1105 spin_lock(&swap_avail_lock);
1106 nextsi:
1107 /*
1108 * if we got here, it's likely that si was almost full before,
1109 * and since scan_swap_map_slots() can drop the si->lock,
1110 * multiple callers probably all tried to get a page from the
1111 * same si and it filled up before we could get one; or, the si
1112 * filled up between us dropping swap_avail_lock and taking
1113 * si->lock. Since we dropped the swap_avail_lock, the
1114 * swap_avail_head list may have been modified; so if next is
1115 * still in the swap_avail_head list then try it, otherwise
1116 * start over if we have not gotten any slots.
1117 */
1118 if (plist_node_empty(&next->avail_lists[node]))
1119 goto start_over;
1120 }
1121
1122 spin_unlock(&swap_avail_lock);
1123
1124 check_out:
1125 if (n_ret < n_goal)
1126 atomic_long_add((long)(n_goal - n_ret) * size,
1127 &nr_swap_pages);
1128 noswap:
1129 return n_ret;
1130 }
1131
_swap_info_get(swp_entry_t entry)1132 static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1133 {
1134 struct swap_info_struct *p;
1135 unsigned long offset;
1136
1137 if (!entry.val)
1138 goto out;
1139 p = swp_swap_info(entry);
1140 if (!p)
1141 goto bad_nofile;
1142 if (data_race(!(p->flags & SWP_USED)))
1143 goto bad_device;
1144 offset = swp_offset(entry);
1145 if (offset >= p->max)
1146 goto bad_offset;
1147 if (data_race(!p->swap_map[swp_offset(entry)]))
1148 goto bad_free;
1149 return p;
1150
1151 bad_free:
1152 pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
1153 goto out;
1154 bad_offset:
1155 pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1156 goto out;
1157 bad_device:
1158 pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val);
1159 goto out;
1160 bad_nofile:
1161 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1162 out:
1163 return NULL;
1164 }
1165
swap_info_get_cont(swp_entry_t entry,struct swap_info_struct * q)1166 static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry,
1167 struct swap_info_struct *q)
1168 {
1169 struct swap_info_struct *p;
1170
1171 p = _swap_info_get(entry);
1172
1173 if (p != q) {
1174 if (q != NULL)
1175 spin_unlock(&q->lock);
1176 if (p != NULL)
1177 spin_lock(&p->lock);
1178 }
1179 return p;
1180 }
1181
__swap_entry_free_locked(struct swap_info_struct * p,unsigned long offset,unsigned char usage)1182 static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
1183 unsigned long offset,
1184 unsigned char usage)
1185 {
1186 unsigned char count;
1187 unsigned char has_cache;
1188
1189 count = p->swap_map[offset];
1190
1191 has_cache = count & SWAP_HAS_CACHE;
1192 count &= ~SWAP_HAS_CACHE;
1193
1194 if (usage == SWAP_HAS_CACHE) {
1195 VM_BUG_ON(!has_cache);
1196 has_cache = 0;
1197 } else if (count == SWAP_MAP_SHMEM) {
1198 /*
1199 * Or we could insist on shmem.c using a special
1200 * swap_shmem_free() and free_shmem_swap_and_cache()...
1201 */
1202 count = 0;
1203 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1204 if (count == COUNT_CONTINUED) {
1205 if (swap_count_continued(p, offset, count))
1206 count = SWAP_MAP_MAX | COUNT_CONTINUED;
1207 else
1208 count = SWAP_MAP_MAX;
1209 } else
1210 count--;
1211 }
1212
1213 usage = count | has_cache;
1214 if (usage)
1215 WRITE_ONCE(p->swap_map[offset], usage);
1216 else
1217 WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE);
1218
1219 return usage;
1220 }
1221
1222 /*
1223 * Check whether swap entry is valid in the swap device. If so,
1224 * return pointer to swap_info_struct, and keep the swap entry valid
1225 * via preventing the swap device from being swapoff, until
1226 * put_swap_device() is called. Otherwise return NULL.
1227 *
1228 * Notice that swapoff or swapoff+swapon can still happen before the
1229 * percpu_ref_tryget_live() in get_swap_device() or after the
1230 * percpu_ref_put() in put_swap_device() if there isn't any other way
1231 * to prevent swapoff, such as page lock, page table lock, etc. The
1232 * caller must be prepared for that. For example, the following
1233 * situation is possible.
1234 *
1235 * CPU1 CPU2
1236 * do_swap_page()
1237 * ... swapoff+swapon
1238 * __read_swap_cache_async()
1239 * swapcache_prepare()
1240 * __swap_duplicate()
1241 * // check swap_map
1242 * // verify PTE not changed
1243 *
1244 * In __swap_duplicate(), the swap_map need to be checked before
1245 * changing partly because the specified swap entry may be for another
1246 * swap device which has been swapoff. And in do_swap_page(), after
1247 * the page is read from the swap device, the PTE is verified not
1248 * changed with the page table locked to check whether the swap device
1249 * has been swapoff or swapoff+swapon.
1250 */
get_swap_device(swp_entry_t entry)1251 struct swap_info_struct *get_swap_device(swp_entry_t entry)
1252 {
1253 struct swap_info_struct *si;
1254 unsigned long offset;
1255
1256 if (!entry.val)
1257 goto out;
1258 si = swp_swap_info(entry);
1259 if (!si)
1260 goto bad_nofile;
1261 if (!percpu_ref_tryget_live(&si->users))
1262 goto out;
1263 /*
1264 * Guarantee the si->users are checked before accessing other
1265 * fields of swap_info_struct.
1266 *
1267 * Paired with the spin_unlock() after setup_swap_info() in
1268 * enable_swap_info().
1269 */
1270 smp_rmb();
1271 offset = swp_offset(entry);
1272 if (offset >= si->max)
1273 goto put_out;
1274
1275 return si;
1276 bad_nofile:
1277 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1278 out:
1279 return NULL;
1280 put_out:
1281 pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1282 percpu_ref_put(&si->users);
1283 return NULL;
1284 }
1285
__swap_entry_free(struct swap_info_struct * p,swp_entry_t entry)1286 static unsigned char __swap_entry_free(struct swap_info_struct *p,
1287 swp_entry_t entry)
1288 {
1289 struct swap_cluster_info *ci;
1290 unsigned long offset = swp_offset(entry);
1291 unsigned char usage;
1292
1293 ci = lock_cluster_or_swap_info(p, offset);
1294 usage = __swap_entry_free_locked(p, offset, 1);
1295 unlock_cluster_or_swap_info(p, ci);
1296 if (!usage)
1297 free_swap_slot(entry);
1298
1299 return usage;
1300 }
1301
swap_entry_free(struct swap_info_struct * p,swp_entry_t entry)1302 static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
1303 {
1304 struct swap_cluster_info *ci;
1305 unsigned long offset = swp_offset(entry);
1306 unsigned char count;
1307
1308 ci = lock_cluster(p, offset);
1309 count = p->swap_map[offset];
1310 VM_BUG_ON(count != SWAP_HAS_CACHE);
1311 p->swap_map[offset] = 0;
1312 dec_cluster_info_page(p, p->cluster_info, offset);
1313 unlock_cluster(ci);
1314
1315 mem_cgroup_uncharge_swap(entry, 1);
1316 swap_range_free(p, offset, 1);
1317 }
1318
1319 /*
1320 * Caller has made sure that the swap device corresponding to entry
1321 * is still around or has not been recycled.
1322 */
swap_free(swp_entry_t entry)1323 void swap_free(swp_entry_t entry)
1324 {
1325 struct swap_info_struct *p;
1326
1327 p = _swap_info_get(entry);
1328 if (p)
1329 __swap_entry_free(p, entry);
1330 }
1331
1332 /*
1333 * Called after dropping swapcache to decrease refcnt to swap entries.
1334 */
put_swap_folio(struct folio * folio,swp_entry_t entry)1335 void put_swap_folio(struct folio *folio, swp_entry_t entry)
1336 {
1337 unsigned long offset = swp_offset(entry);
1338 unsigned long idx = offset / SWAPFILE_CLUSTER;
1339 struct swap_cluster_info *ci;
1340 struct swap_info_struct *si;
1341 unsigned char *map;
1342 unsigned int i, free_entries = 0;
1343 unsigned char val;
1344 int size = swap_entry_size(folio_nr_pages(folio));
1345
1346 si = _swap_info_get(entry);
1347 if (!si)
1348 return;
1349
1350 ci = lock_cluster_or_swap_info(si, offset);
1351 if (size == SWAPFILE_CLUSTER) {
1352 VM_BUG_ON(!cluster_is_huge(ci));
1353 map = si->swap_map + offset;
1354 for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1355 val = map[i];
1356 VM_BUG_ON(!(val & SWAP_HAS_CACHE));
1357 if (val == SWAP_HAS_CACHE)
1358 free_entries++;
1359 }
1360 cluster_clear_huge(ci);
1361 if (free_entries == SWAPFILE_CLUSTER) {
1362 unlock_cluster_or_swap_info(si, ci);
1363 spin_lock(&si->lock);
1364 mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
1365 swap_free_cluster(si, idx);
1366 spin_unlock(&si->lock);
1367 return;
1368 }
1369 }
1370 for (i = 0; i < size; i++, entry.val++) {
1371 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1372 unlock_cluster_or_swap_info(si, ci);
1373 free_swap_slot(entry);
1374 if (i == size - 1)
1375 return;
1376 lock_cluster_or_swap_info(si, offset);
1377 }
1378 }
1379 unlock_cluster_or_swap_info(si, ci);
1380 }
1381
1382 #ifdef CONFIG_THP_SWAP
split_swap_cluster(swp_entry_t entry)1383 int split_swap_cluster(swp_entry_t entry)
1384 {
1385 struct swap_info_struct *si;
1386 struct swap_cluster_info *ci;
1387 unsigned long offset = swp_offset(entry);
1388
1389 si = _swap_info_get(entry);
1390 if (!si)
1391 return -EBUSY;
1392 ci = lock_cluster(si, offset);
1393 cluster_clear_huge(ci);
1394 unlock_cluster(ci);
1395 return 0;
1396 }
1397 #endif
1398
swp_entry_cmp(const void * ent1,const void * ent2)1399 static int swp_entry_cmp(const void *ent1, const void *ent2)
1400 {
1401 const swp_entry_t *e1 = ent1, *e2 = ent2;
1402
1403 return (int)swp_type(*e1) - (int)swp_type(*e2);
1404 }
1405
swapcache_free_entries(swp_entry_t * entries,int n)1406 void swapcache_free_entries(swp_entry_t *entries, int n)
1407 {
1408 struct swap_info_struct *p, *prev;
1409 int i;
1410
1411 if (n <= 0)
1412 return;
1413
1414 prev = NULL;
1415 p = NULL;
1416
1417 /*
1418 * Sort swap entries by swap device, so each lock is only taken once.
1419 * nr_swapfiles isn't absolutely correct, but the overhead of sort() is
1420 * so low that it isn't necessary to optimize further.
1421 */
1422 if (nr_swapfiles > 1)
1423 sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL);
1424 for (i = 0; i < n; ++i) {
1425 p = swap_info_get_cont(entries[i], prev);
1426 if (p)
1427 swap_entry_free(p, entries[i]);
1428 prev = p;
1429 }
1430 if (p)
1431 spin_unlock(&p->lock);
1432 }
1433
__swap_count(swp_entry_t entry)1434 int __swap_count(swp_entry_t entry)
1435 {
1436 struct swap_info_struct *si;
1437 pgoff_t offset = swp_offset(entry);
1438 int count = 0;
1439
1440 si = get_swap_device(entry);
1441 if (si) {
1442 count = swap_count(si->swap_map[offset]);
1443 put_swap_device(si);
1444 }
1445 return count;
1446 }
1447
1448 /*
1449 * How many references to @entry are currently swapped out?
1450 * This does not give an exact answer when swap count is continued,
1451 * but does include the high COUNT_CONTINUED flag to allow for that.
1452 */
swap_swapcount(struct swap_info_struct * si,swp_entry_t entry)1453 static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1454 {
1455 pgoff_t offset = swp_offset(entry);
1456 struct swap_cluster_info *ci;
1457 int count;
1458
1459 ci = lock_cluster_or_swap_info(si, offset);
1460 count = swap_count(si->swap_map[offset]);
1461 unlock_cluster_or_swap_info(si, ci);
1462 return count;
1463 }
1464
1465 /*
1466 * How many references to @entry are currently swapped out?
1467 * This does not give an exact answer when swap count is continued,
1468 * but does include the high COUNT_CONTINUED flag to allow for that.
1469 */
__swp_swapcount(swp_entry_t entry)1470 int __swp_swapcount(swp_entry_t entry)
1471 {
1472 int count = 0;
1473 struct swap_info_struct *si;
1474
1475 si = get_swap_device(entry);
1476 if (si) {
1477 count = swap_swapcount(si, entry);
1478 put_swap_device(si);
1479 }
1480 return count;
1481 }
1482
1483 /*
1484 * How many references to @entry are currently swapped out?
1485 * This considers COUNT_CONTINUED so it returns exact answer.
1486 */
swp_swapcount(swp_entry_t entry)1487 int swp_swapcount(swp_entry_t entry)
1488 {
1489 int count, tmp_count, n;
1490 struct swap_info_struct *p;
1491 struct swap_cluster_info *ci;
1492 struct page *page;
1493 pgoff_t offset;
1494 unsigned char *map;
1495
1496 p = _swap_info_get(entry);
1497 if (!p)
1498 return 0;
1499
1500 offset = swp_offset(entry);
1501
1502 ci = lock_cluster_or_swap_info(p, offset);
1503
1504 count = swap_count(p->swap_map[offset]);
1505 if (!(count & COUNT_CONTINUED))
1506 goto out;
1507
1508 count &= ~COUNT_CONTINUED;
1509 n = SWAP_MAP_MAX + 1;
1510
1511 page = vmalloc_to_page(p->swap_map + offset);
1512 offset &= ~PAGE_MASK;
1513 VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1514
1515 do {
1516 page = list_next_entry(page, lru);
1517 map = kmap_atomic(page);
1518 tmp_count = map[offset];
1519 kunmap_atomic(map);
1520
1521 count += (tmp_count & ~COUNT_CONTINUED) * n;
1522 n *= (SWAP_CONT_MAX + 1);
1523 } while (tmp_count & COUNT_CONTINUED);
1524 out:
1525 unlock_cluster_or_swap_info(p, ci);
1526 return count;
1527 }
1528
swap_page_trans_huge_swapped(struct swap_info_struct * si,swp_entry_t entry)1529 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1530 swp_entry_t entry)
1531 {
1532 struct swap_cluster_info *ci;
1533 unsigned char *map = si->swap_map;
1534 unsigned long roffset = swp_offset(entry);
1535 unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER);
1536 int i;
1537 bool ret = false;
1538
1539 ci = lock_cluster_or_swap_info(si, offset);
1540 if (!ci || !cluster_is_huge(ci)) {
1541 if (swap_count(map[roffset]))
1542 ret = true;
1543 goto unlock_out;
1544 }
1545 for (i = 0; i < SWAPFILE_CLUSTER; i++) {
1546 if (swap_count(map[offset + i])) {
1547 ret = true;
1548 break;
1549 }
1550 }
1551 unlock_out:
1552 unlock_cluster_or_swap_info(si, ci);
1553 return ret;
1554 }
1555
folio_swapped(struct folio * folio)1556 static bool folio_swapped(struct folio *folio)
1557 {
1558 swp_entry_t entry = folio_swap_entry(folio);
1559 struct swap_info_struct *si = _swap_info_get(entry);
1560
1561 if (!si)
1562 return false;
1563
1564 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
1565 return swap_swapcount(si, entry) != 0;
1566
1567 return swap_page_trans_huge_swapped(si, entry);
1568 }
1569
1570 /**
1571 * folio_free_swap() - Free the swap space used for this folio.
1572 * @folio: The folio to remove.
1573 *
1574 * If swap is getting full, or if there are no more mappings of this folio,
1575 * then call folio_free_swap to free its swap space.
1576 *
1577 * Return: true if we were able to release the swap space.
1578 */
folio_free_swap(struct folio * folio)1579 bool folio_free_swap(struct folio *folio)
1580 {
1581 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1582
1583 if (!folio_test_swapcache(folio))
1584 return false;
1585 if (folio_test_writeback(folio))
1586 return false;
1587 if (folio_swapped(folio))
1588 return false;
1589
1590 /*
1591 * Once hibernation has begun to create its image of memory,
1592 * there's a danger that one of the calls to folio_free_swap()
1593 * - most probably a call from __try_to_reclaim_swap() while
1594 * hibernation is allocating its own swap pages for the image,
1595 * but conceivably even a call from memory reclaim - will free
1596 * the swap from a folio which has already been recorded in the
1597 * image as a clean swapcache folio, and then reuse its swap for
1598 * another page of the image. On waking from hibernation, the
1599 * original folio might be freed under memory pressure, then
1600 * later read back in from swap, now with the wrong data.
1601 *
1602 * Hibernation suspends storage while it is writing the image
1603 * to disk so check that here.
1604 */
1605 if (pm_suspended_storage())
1606 return false;
1607
1608 delete_from_swap_cache(folio);
1609 folio_set_dirty(folio);
1610 return true;
1611 }
1612
1613 /*
1614 * Free the swap entry like above, but also try to
1615 * free the page cache entry if it is the last user.
1616 */
free_swap_and_cache(swp_entry_t entry)1617 int free_swap_and_cache(swp_entry_t entry)
1618 {
1619 struct swap_info_struct *p;
1620 unsigned char count;
1621
1622 if (non_swap_entry(entry))
1623 return 1;
1624
1625 p = _swap_info_get(entry);
1626 if (p) {
1627 count = __swap_entry_free(p, entry);
1628 if (count == SWAP_HAS_CACHE &&
1629 !swap_page_trans_huge_swapped(p, entry))
1630 __try_to_reclaim_swap(p, swp_offset(entry),
1631 TTRS_UNMAPPED | TTRS_FULL);
1632 }
1633 return p != NULL;
1634 }
1635
1636 #ifdef CONFIG_HIBERNATION
1637
get_swap_page_of_type(int type)1638 swp_entry_t get_swap_page_of_type(int type)
1639 {
1640 struct swap_info_struct *si = swap_type_to_swap_info(type);
1641 swp_entry_t entry = {0};
1642
1643 if (!si)
1644 goto fail;
1645
1646 /* This is called for allocating swap entry, not cache */
1647 spin_lock(&si->lock);
1648 if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry))
1649 atomic_long_dec(&nr_swap_pages);
1650 spin_unlock(&si->lock);
1651 fail:
1652 return entry;
1653 }
1654
1655 /*
1656 * Find the swap type that corresponds to given device (if any).
1657 *
1658 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1659 * from 0, in which the swap header is expected to be located.
1660 *
1661 * This is needed for the suspend to disk (aka swsusp).
1662 */
swap_type_of(dev_t device,sector_t offset)1663 int swap_type_of(dev_t device, sector_t offset)
1664 {
1665 int type;
1666
1667 if (!device)
1668 return -1;
1669
1670 spin_lock(&swap_lock);
1671 for (type = 0; type < nr_swapfiles; type++) {
1672 struct swap_info_struct *sis = swap_info[type];
1673
1674 if (!(sis->flags & SWP_WRITEOK))
1675 continue;
1676
1677 if (device == sis->bdev->bd_dev) {
1678 struct swap_extent *se = first_se(sis);
1679
1680 if (se->start_block == offset) {
1681 spin_unlock(&swap_lock);
1682 return type;
1683 }
1684 }
1685 }
1686 spin_unlock(&swap_lock);
1687 return -ENODEV;
1688 }
1689
find_first_swap(dev_t * device)1690 int find_first_swap(dev_t *device)
1691 {
1692 int type;
1693
1694 spin_lock(&swap_lock);
1695 for (type = 0; type < nr_swapfiles; type++) {
1696 struct swap_info_struct *sis = swap_info[type];
1697
1698 if (!(sis->flags & SWP_WRITEOK))
1699 continue;
1700 *device = sis->bdev->bd_dev;
1701 spin_unlock(&swap_lock);
1702 return type;
1703 }
1704 spin_unlock(&swap_lock);
1705 return -ENODEV;
1706 }
1707
1708 /*
1709 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1710 * corresponding to given index in swap_info (swap type).
1711 */
swapdev_block(int type,pgoff_t offset)1712 sector_t swapdev_block(int type, pgoff_t offset)
1713 {
1714 struct swap_info_struct *si = swap_type_to_swap_info(type);
1715 struct swap_extent *se;
1716
1717 if (!si || !(si->flags & SWP_WRITEOK))
1718 return 0;
1719 se = offset_to_swap_extent(si, offset);
1720 return se->start_block + (offset - se->start_page);
1721 }
1722
1723 /*
1724 * Return either the total number of swap pages of given type, or the number
1725 * of free pages of that type (depending on @free)
1726 *
1727 * This is needed for software suspend
1728 */
count_swap_pages(int type,int free)1729 unsigned int count_swap_pages(int type, int free)
1730 {
1731 unsigned int n = 0;
1732
1733 spin_lock(&swap_lock);
1734 if ((unsigned int)type < nr_swapfiles) {
1735 struct swap_info_struct *sis = swap_info[type];
1736
1737 spin_lock(&sis->lock);
1738 if (sis->flags & SWP_WRITEOK) {
1739 n = sis->pages;
1740 if (free)
1741 n -= sis->inuse_pages;
1742 }
1743 spin_unlock(&sis->lock);
1744 }
1745 spin_unlock(&swap_lock);
1746 return n;
1747 }
1748 #endif /* CONFIG_HIBERNATION */
1749
pte_same_as_swp(pte_t pte,pte_t swp_pte)1750 static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1751 {
1752 return pte_same(pte_swp_clear_flags(pte), swp_pte);
1753 }
1754
1755 /*
1756 * No need to decide whether this PTE shares the swap entry with others,
1757 * just let do_wp_page work it out if a write is requested later - to
1758 * force COW, vm_page_prot omits write permission from any private vma.
1759 */
unuse_pte(struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,swp_entry_t entry,struct folio * folio)1760 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1761 unsigned long addr, swp_entry_t entry, struct folio *folio)
1762 {
1763 struct page *page = folio_file_page(folio, swp_offset(entry));
1764 struct page *swapcache;
1765 spinlock_t *ptl;
1766 pte_t *pte, new_pte;
1767 int ret = 1;
1768
1769 swapcache = page;
1770 page = ksm_might_need_to_copy(page, vma, addr);
1771 if (unlikely(!page))
1772 return -ENOMEM;
1773
1774 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1775 if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) {
1776 ret = 0;
1777 goto out;
1778 }
1779
1780 if (unlikely(!PageUptodate(page))) {
1781 pte_t pteval;
1782
1783 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1784 pteval = swp_entry_to_pte(make_swapin_error_entry(page));
1785 set_pte_at(vma->vm_mm, addr, pte, pteval);
1786 swap_free(entry);
1787 ret = 0;
1788 goto out;
1789 }
1790
1791 /* See do_swap_page() */
1792 BUG_ON(!PageAnon(page) && PageMappedToDisk(page));
1793 BUG_ON(PageAnon(page) && PageAnonExclusive(page));
1794
1795 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
1796 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
1797 get_page(page);
1798 if (page == swapcache) {
1799 rmap_t rmap_flags = RMAP_NONE;
1800
1801 /*
1802 * See do_swap_page(): PageWriteback() would be problematic.
1803 * However, we do a wait_on_page_writeback() just before this
1804 * call and have the page locked.
1805 */
1806 VM_BUG_ON_PAGE(PageWriteback(page), page);
1807 if (pte_swp_exclusive(*pte))
1808 rmap_flags |= RMAP_EXCLUSIVE;
1809
1810 page_add_anon_rmap(page, vma, addr, rmap_flags);
1811 } else { /* ksm created a completely new copy */
1812 page_add_new_anon_rmap(page, vma, addr);
1813 lru_cache_add_inactive_or_unevictable(page, vma);
1814 }
1815 new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
1816 if (pte_swp_soft_dirty(*pte))
1817 new_pte = pte_mksoft_dirty(new_pte);
1818 if (pte_swp_uffd_wp(*pte))
1819 new_pte = pte_mkuffd_wp(new_pte);
1820 set_pte_at(vma->vm_mm, addr, pte, new_pte);
1821 swap_free(entry);
1822 out:
1823 pte_unmap_unlock(pte, ptl);
1824 if (page != swapcache) {
1825 unlock_page(page);
1826 put_page(page);
1827 }
1828 return ret;
1829 }
1830
unuse_pte_range(struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,unsigned int type)1831 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1832 unsigned long addr, unsigned long end,
1833 unsigned int type)
1834 {
1835 swp_entry_t entry;
1836 pte_t *pte;
1837 struct swap_info_struct *si;
1838 int ret = 0;
1839 volatile unsigned char *swap_map;
1840
1841 si = swap_info[type];
1842 pte = pte_offset_map(pmd, addr);
1843 do {
1844 struct folio *folio;
1845 unsigned long offset;
1846
1847 if (!is_swap_pte(*pte))
1848 continue;
1849
1850 entry = pte_to_swp_entry(*pte);
1851 if (swp_type(entry) != type)
1852 continue;
1853
1854 offset = swp_offset(entry);
1855 pte_unmap(pte);
1856 swap_map = &si->swap_map[offset];
1857 folio = swap_cache_get_folio(entry, vma, addr);
1858 if (!folio) {
1859 struct page *page;
1860 struct vm_fault vmf = {
1861 .vma = vma,
1862 .address = addr,
1863 .real_address = addr,
1864 .pmd = pmd,
1865 };
1866
1867 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
1868 &vmf);
1869 if (page)
1870 folio = page_folio(page);
1871 }
1872 if (!folio) {
1873 if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD)
1874 goto try_next;
1875 return -ENOMEM;
1876 }
1877
1878 folio_lock(folio);
1879 folio_wait_writeback(folio);
1880 ret = unuse_pte(vma, pmd, addr, entry, folio);
1881 if (ret < 0) {
1882 folio_unlock(folio);
1883 folio_put(folio);
1884 goto out;
1885 }
1886
1887 folio_free_swap(folio);
1888 folio_unlock(folio);
1889 folio_put(folio);
1890 try_next:
1891 pte = pte_offset_map(pmd, addr);
1892 } while (pte++, addr += PAGE_SIZE, addr != end);
1893 pte_unmap(pte - 1);
1894
1895 ret = 0;
1896 out:
1897 return ret;
1898 }
1899
unuse_pmd_range(struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,unsigned int type)1900 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
1901 unsigned long addr, unsigned long end,
1902 unsigned int type)
1903 {
1904 pmd_t *pmd;
1905 unsigned long next;
1906 int ret;
1907
1908 pmd = pmd_offset(pud, addr);
1909 do {
1910 cond_resched();
1911 next = pmd_addr_end(addr, end);
1912 if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1913 continue;
1914 ret = unuse_pte_range(vma, pmd, addr, next, type);
1915 if (ret)
1916 return ret;
1917 } while (pmd++, addr = next, addr != end);
1918 return 0;
1919 }
1920
unuse_pud_range(struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned int type)1921 static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
1922 unsigned long addr, unsigned long end,
1923 unsigned int type)
1924 {
1925 pud_t *pud;
1926 unsigned long next;
1927 int ret;
1928
1929 pud = pud_offset(p4d, addr);
1930 do {
1931 next = pud_addr_end(addr, end);
1932 if (pud_none_or_clear_bad(pud))
1933 continue;
1934 ret = unuse_pmd_range(vma, pud, addr, next, type);
1935 if (ret)
1936 return ret;
1937 } while (pud++, addr = next, addr != end);
1938 return 0;
1939 }
1940
unuse_p4d_range(struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned int type)1941 static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
1942 unsigned long addr, unsigned long end,
1943 unsigned int type)
1944 {
1945 p4d_t *p4d;
1946 unsigned long next;
1947 int ret;
1948
1949 p4d = p4d_offset(pgd, addr);
1950 do {
1951 next = p4d_addr_end(addr, end);
1952 if (p4d_none_or_clear_bad(p4d))
1953 continue;
1954 ret = unuse_pud_range(vma, p4d, addr, next, type);
1955 if (ret)
1956 return ret;
1957 } while (p4d++, addr = next, addr != end);
1958 return 0;
1959 }
1960
unuse_vma(struct vm_area_struct * vma,unsigned int type)1961 static int unuse_vma(struct vm_area_struct *vma, unsigned int type)
1962 {
1963 pgd_t *pgd;
1964 unsigned long addr, end, next;
1965 int ret;
1966
1967 addr = vma->vm_start;
1968 end = vma->vm_end;
1969
1970 pgd = pgd_offset(vma->vm_mm, addr);
1971 do {
1972 next = pgd_addr_end(addr, end);
1973 if (pgd_none_or_clear_bad(pgd))
1974 continue;
1975 ret = unuse_p4d_range(vma, pgd, addr, next, type);
1976 if (ret)
1977 return ret;
1978 } while (pgd++, addr = next, addr != end);
1979 return 0;
1980 }
1981
unuse_mm(struct mm_struct * mm,unsigned int type)1982 static int unuse_mm(struct mm_struct *mm, unsigned int type)
1983 {
1984 struct vm_area_struct *vma;
1985 int ret = 0;
1986 VMA_ITERATOR(vmi, mm, 0);
1987
1988 mmap_read_lock(mm);
1989 for_each_vma(vmi, vma) {
1990 if (vma->anon_vma) {
1991 ret = unuse_vma(vma, type);
1992 if (ret)
1993 break;
1994 }
1995
1996 cond_resched();
1997 }
1998 mmap_read_unlock(mm);
1999 return ret;
2000 }
2001
2002 /*
2003 * Scan swap_map from current position to next entry still in use.
2004 * Return 0 if there are no inuse entries after prev till end of
2005 * the map.
2006 */
find_next_to_unuse(struct swap_info_struct * si,unsigned int prev)2007 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2008 unsigned int prev)
2009 {
2010 unsigned int i;
2011 unsigned char count;
2012
2013 /*
2014 * No need for swap_lock here: we're just looking
2015 * for whether an entry is in use, not modifying it; false
2016 * hits are okay, and sys_swapoff() has already prevented new
2017 * allocations from this area (while holding swap_lock).
2018 */
2019 for (i = prev + 1; i < si->max; i++) {
2020 count = READ_ONCE(si->swap_map[i]);
2021 if (count && swap_count(count) != SWAP_MAP_BAD)
2022 break;
2023 if ((i % LATENCY_LIMIT) == 0)
2024 cond_resched();
2025 }
2026
2027 if (i == si->max)
2028 i = 0;
2029
2030 return i;
2031 }
2032
try_to_unuse(unsigned int type)2033 static int try_to_unuse(unsigned int type)
2034 {
2035 struct mm_struct *prev_mm;
2036 struct mm_struct *mm;
2037 struct list_head *p;
2038 int retval = 0;
2039 struct swap_info_struct *si = swap_info[type];
2040 struct folio *folio;
2041 swp_entry_t entry;
2042 unsigned int i;
2043
2044 if (!READ_ONCE(si->inuse_pages))
2045 return 0;
2046
2047 retry:
2048 retval = shmem_unuse(type);
2049 if (retval)
2050 return retval;
2051
2052 prev_mm = &init_mm;
2053 mmget(prev_mm);
2054
2055 spin_lock(&mmlist_lock);
2056 p = &init_mm.mmlist;
2057 while (READ_ONCE(si->inuse_pages) &&
2058 !signal_pending(current) &&
2059 (p = p->next) != &init_mm.mmlist) {
2060
2061 mm = list_entry(p, struct mm_struct, mmlist);
2062 if (!mmget_not_zero(mm))
2063 continue;
2064 spin_unlock(&mmlist_lock);
2065 mmput(prev_mm);
2066 prev_mm = mm;
2067 retval = unuse_mm(mm, type);
2068 if (retval) {
2069 mmput(prev_mm);
2070 return retval;
2071 }
2072
2073 /*
2074 * Make sure that we aren't completely killing
2075 * interactive performance.
2076 */
2077 cond_resched();
2078 spin_lock(&mmlist_lock);
2079 }
2080 spin_unlock(&mmlist_lock);
2081
2082 mmput(prev_mm);
2083
2084 i = 0;
2085 while (READ_ONCE(si->inuse_pages) &&
2086 !signal_pending(current) &&
2087 (i = find_next_to_unuse(si, i)) != 0) {
2088
2089 entry = swp_entry(type, i);
2090 folio = filemap_get_folio(swap_address_space(entry), i);
2091 if (!folio)
2092 continue;
2093
2094 /*
2095 * It is conceivable that a racing task removed this folio from
2096 * swap cache just before we acquired the page lock. The folio
2097 * might even be back in swap cache on another swap area. But
2098 * that is okay, folio_free_swap() only removes stale folios.
2099 */
2100 folio_lock(folio);
2101 folio_wait_writeback(folio);
2102 folio_free_swap(folio);
2103 folio_unlock(folio);
2104 folio_put(folio);
2105 }
2106
2107 /*
2108 * Lets check again to see if there are still swap entries in the map.
2109 * If yes, we would need to do retry the unuse logic again.
2110 * Under global memory pressure, swap entries can be reinserted back
2111 * into process space after the mmlist loop above passes over them.
2112 *
2113 * Limit the number of retries? No: when mmget_not_zero()
2114 * above fails, that mm is likely to be freeing swap from
2115 * exit_mmap(), which proceeds at its own independent pace;
2116 * and even shmem_writepage() could have been preempted after
2117 * folio_alloc_swap(), temporarily hiding that swap. It's easy
2118 * and robust (though cpu-intensive) just to keep retrying.
2119 */
2120 if (READ_ONCE(si->inuse_pages)) {
2121 if (!signal_pending(current))
2122 goto retry;
2123 return -EINTR;
2124 }
2125
2126 return 0;
2127 }
2128
2129 /*
2130 * After a successful try_to_unuse, if no swap is now in use, we know
2131 * we can empty the mmlist. swap_lock must be held on entry and exit.
2132 * Note that mmlist_lock nests inside swap_lock, and an mm must be
2133 * added to the mmlist just after page_duplicate - before would be racy.
2134 */
drain_mmlist(void)2135 static void drain_mmlist(void)
2136 {
2137 struct list_head *p, *next;
2138 unsigned int type;
2139
2140 for (type = 0; type < nr_swapfiles; type++)
2141 if (swap_info[type]->inuse_pages)
2142 return;
2143 spin_lock(&mmlist_lock);
2144 list_for_each_safe(p, next, &init_mm.mmlist)
2145 list_del_init(p);
2146 spin_unlock(&mmlist_lock);
2147 }
2148
2149 /*
2150 * Free all of a swapdev's extent information
2151 */
destroy_swap_extents(struct swap_info_struct * sis)2152 static void destroy_swap_extents(struct swap_info_struct *sis)
2153 {
2154 while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2155 struct rb_node *rb = sis->swap_extent_root.rb_node;
2156 struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2157
2158 rb_erase(rb, &sis->swap_extent_root);
2159 kfree(se);
2160 }
2161
2162 if (sis->flags & SWP_ACTIVATED) {
2163 struct file *swap_file = sis->swap_file;
2164 struct address_space *mapping = swap_file->f_mapping;
2165
2166 sis->flags &= ~SWP_ACTIVATED;
2167 if (mapping->a_ops->swap_deactivate)
2168 mapping->a_ops->swap_deactivate(swap_file);
2169 }
2170 }
2171
2172 /*
2173 * Add a block range (and the corresponding page range) into this swapdev's
2174 * extent tree.
2175 *
2176 * This function rather assumes that it is called in ascending page order.
2177 */
2178 int
add_swap_extent(struct swap_info_struct * sis,unsigned long start_page,unsigned long nr_pages,sector_t start_block)2179 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2180 unsigned long nr_pages, sector_t start_block)
2181 {
2182 struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2183 struct swap_extent *se;
2184 struct swap_extent *new_se;
2185
2186 /*
2187 * place the new node at the right most since the
2188 * function is called in ascending page order.
2189 */
2190 while (*link) {
2191 parent = *link;
2192 link = &parent->rb_right;
2193 }
2194
2195 if (parent) {
2196 se = rb_entry(parent, struct swap_extent, rb_node);
2197 BUG_ON(se->start_page + se->nr_pages != start_page);
2198 if (se->start_block + se->nr_pages == start_block) {
2199 /* Merge it */
2200 se->nr_pages += nr_pages;
2201 return 0;
2202 }
2203 }
2204
2205 /* No merge, insert a new extent. */
2206 new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2207 if (new_se == NULL)
2208 return -ENOMEM;
2209 new_se->start_page = start_page;
2210 new_se->nr_pages = nr_pages;
2211 new_se->start_block = start_block;
2212
2213 rb_link_node(&new_se->rb_node, parent, link);
2214 rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2215 return 1;
2216 }
2217 EXPORT_SYMBOL_GPL(add_swap_extent);
2218
2219 /*
2220 * A `swap extent' is a simple thing which maps a contiguous range of pages
2221 * onto a contiguous range of disk blocks. A rbtree of swap extents is
2222 * built at swapon time and is then used at swap_writepage/swap_readpage
2223 * time for locating where on disk a page belongs.
2224 *
2225 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2226 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2227 * swap files identically.
2228 *
2229 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2230 * extent rbtree operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
2231 * swapfiles are handled *identically* after swapon time.
2232 *
2233 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2234 * and will parse them into a rbtree, in PAGE_SIZE chunks. If some stray
2235 * blocks are found which do not fall within the PAGE_SIZE alignment
2236 * requirements, they are simply tossed out - we will never use those blocks
2237 * for swapping.
2238 *
2239 * For all swap devices we set S_SWAPFILE across the life of the swapon. This
2240 * prevents users from writing to the swap device, which will corrupt memory.
2241 *
2242 * The amount of disk space which a single swap extent represents varies.
2243 * Typically it is in the 1-4 megabyte range. So we can have hundreds of
2244 * extents in the rbtree. - akpm.
2245 */
setup_swap_extents(struct swap_info_struct * sis,sector_t * span)2246 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2247 {
2248 struct file *swap_file = sis->swap_file;
2249 struct address_space *mapping = swap_file->f_mapping;
2250 struct inode *inode = mapping->host;
2251 int ret;
2252
2253 if (S_ISBLK(inode->i_mode)) {
2254 ret = add_swap_extent(sis, 0, sis->max, 0);
2255 *span = sis->pages;
2256 return ret;
2257 }
2258
2259 if (mapping->a_ops->swap_activate) {
2260 ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2261 if (ret < 0)
2262 return ret;
2263 sis->flags |= SWP_ACTIVATED;
2264 if ((sis->flags & SWP_FS_OPS) &&
2265 sio_pool_init() != 0) {
2266 destroy_swap_extents(sis);
2267 return -ENOMEM;
2268 }
2269 return ret;
2270 }
2271
2272 return generic_swapfile_activate(sis, swap_file, span);
2273 }
2274
swap_node(struct swap_info_struct * p)2275 static int swap_node(struct swap_info_struct *p)
2276 {
2277 struct block_device *bdev;
2278
2279 if (p->bdev)
2280 bdev = p->bdev;
2281 else
2282 bdev = p->swap_file->f_inode->i_sb->s_bdev;
2283
2284 return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2285 }
2286
setup_swap_info(struct swap_info_struct * p,int prio,unsigned char * swap_map,struct swap_cluster_info * cluster_info)2287 static void setup_swap_info(struct swap_info_struct *p, int prio,
2288 unsigned char *swap_map,
2289 struct swap_cluster_info *cluster_info)
2290 {
2291 int i;
2292
2293 if (prio >= 0)
2294 p->prio = prio;
2295 else
2296 p->prio = --least_priority;
2297 /*
2298 * the plist prio is negated because plist ordering is
2299 * low-to-high, while swap ordering is high-to-low
2300 */
2301 p->list.prio = -p->prio;
2302 for_each_node(i) {
2303 if (p->prio >= 0)
2304 p->avail_lists[i].prio = -p->prio;
2305 else {
2306 if (swap_node(p) == i)
2307 p->avail_lists[i].prio = 1;
2308 else
2309 p->avail_lists[i].prio = -p->prio;
2310 }
2311 }
2312 p->swap_map = swap_map;
2313 p->cluster_info = cluster_info;
2314 }
2315
_enable_swap_info(struct swap_info_struct * p)2316 static void _enable_swap_info(struct swap_info_struct *p)
2317 {
2318 p->flags |= SWP_WRITEOK;
2319 atomic_long_add(p->pages, &nr_swap_pages);
2320 total_swap_pages += p->pages;
2321
2322 assert_spin_locked(&swap_lock);
2323 /*
2324 * both lists are plists, and thus priority ordered.
2325 * swap_active_head needs to be priority ordered for swapoff(),
2326 * which on removal of any swap_info_struct with an auto-assigned
2327 * (i.e. negative) priority increments the auto-assigned priority
2328 * of any lower-priority swap_info_structs.
2329 * swap_avail_head needs to be priority ordered for folio_alloc_swap(),
2330 * which allocates swap pages from the highest available priority
2331 * swap_info_struct.
2332 */
2333 plist_add(&p->list, &swap_active_head);
2334 add_to_avail_list(p);
2335 }
2336
enable_swap_info(struct swap_info_struct * p,int prio,unsigned char * swap_map,struct swap_cluster_info * cluster_info,unsigned long * frontswap_map)2337 static void enable_swap_info(struct swap_info_struct *p, int prio,
2338 unsigned char *swap_map,
2339 struct swap_cluster_info *cluster_info,
2340 unsigned long *frontswap_map)
2341 {
2342 if (IS_ENABLED(CONFIG_FRONTSWAP))
2343 frontswap_init(p->type, frontswap_map);
2344 spin_lock(&swap_lock);
2345 spin_lock(&p->lock);
2346 setup_swap_info(p, prio, swap_map, cluster_info);
2347 spin_unlock(&p->lock);
2348 spin_unlock(&swap_lock);
2349 /*
2350 * Finished initializing swap device, now it's safe to reference it.
2351 */
2352 percpu_ref_resurrect(&p->users);
2353 spin_lock(&swap_lock);
2354 spin_lock(&p->lock);
2355 _enable_swap_info(p);
2356 spin_unlock(&p->lock);
2357 spin_unlock(&swap_lock);
2358 }
2359
reinsert_swap_info(struct swap_info_struct * p)2360 static void reinsert_swap_info(struct swap_info_struct *p)
2361 {
2362 spin_lock(&swap_lock);
2363 spin_lock(&p->lock);
2364 setup_swap_info(p, p->prio, p->swap_map, p->cluster_info);
2365 _enable_swap_info(p);
2366 spin_unlock(&p->lock);
2367 spin_unlock(&swap_lock);
2368 }
2369
has_usable_swap(void)2370 bool has_usable_swap(void)
2371 {
2372 bool ret = true;
2373
2374 spin_lock(&swap_lock);
2375 if (plist_head_empty(&swap_active_head))
2376 ret = false;
2377 spin_unlock(&swap_lock);
2378 return ret;
2379 }
2380
SYSCALL_DEFINE1(swapoff,const char __user *,specialfile)2381 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2382 {
2383 struct swap_info_struct *p = NULL;
2384 unsigned char *swap_map;
2385 struct swap_cluster_info *cluster_info;
2386 unsigned long *frontswap_map;
2387 struct file *swap_file, *victim;
2388 struct address_space *mapping;
2389 struct inode *inode;
2390 struct filename *pathname;
2391 int err, found = 0;
2392 unsigned int old_block_size;
2393
2394 if (!capable(CAP_SYS_ADMIN))
2395 return -EPERM;
2396
2397 BUG_ON(!current->mm);
2398
2399 pathname = getname(specialfile);
2400 if (IS_ERR(pathname))
2401 return PTR_ERR(pathname);
2402
2403 victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2404 err = PTR_ERR(victim);
2405 if (IS_ERR(victim))
2406 goto out;
2407
2408 mapping = victim->f_mapping;
2409 spin_lock(&swap_lock);
2410 plist_for_each_entry(p, &swap_active_head, list) {
2411 if (p->flags & SWP_WRITEOK) {
2412 if (p->swap_file->f_mapping == mapping) {
2413 found = 1;
2414 break;
2415 }
2416 }
2417 }
2418 if (!found) {
2419 err = -EINVAL;
2420 spin_unlock(&swap_lock);
2421 goto out_dput;
2422 }
2423 if (!security_vm_enough_memory_mm(current->mm, p->pages))
2424 vm_unacct_memory(p->pages);
2425 else {
2426 err = -ENOMEM;
2427 spin_unlock(&swap_lock);
2428 goto out_dput;
2429 }
2430 del_from_avail_list(p);
2431 spin_lock(&p->lock);
2432 if (p->prio < 0) {
2433 struct swap_info_struct *si = p;
2434 int nid;
2435
2436 plist_for_each_entry_continue(si, &swap_active_head, list) {
2437 si->prio++;
2438 si->list.prio--;
2439 for_each_node(nid) {
2440 if (si->avail_lists[nid].prio != 1)
2441 si->avail_lists[nid].prio--;
2442 }
2443 }
2444 least_priority++;
2445 }
2446 plist_del(&p->list, &swap_active_head);
2447 atomic_long_sub(p->pages, &nr_swap_pages);
2448 total_swap_pages -= p->pages;
2449 p->flags &= ~SWP_WRITEOK;
2450 spin_unlock(&p->lock);
2451 spin_unlock(&swap_lock);
2452
2453 disable_swap_slots_cache_lock();
2454
2455 set_current_oom_origin();
2456 err = try_to_unuse(p->type);
2457 clear_current_oom_origin();
2458
2459 if (err) {
2460 /* re-insert swap space back into swap_list */
2461 reinsert_swap_info(p);
2462 reenable_swap_slots_cache_unlock();
2463 goto out_dput;
2464 }
2465
2466 reenable_swap_slots_cache_unlock();
2467
2468 /*
2469 * Wait for swap operations protected by get/put_swap_device()
2470 * to complete.
2471 *
2472 * We need synchronize_rcu() here to protect the accessing to
2473 * the swap cache data structure.
2474 */
2475 percpu_ref_kill(&p->users);
2476 synchronize_rcu();
2477 wait_for_completion(&p->comp);
2478
2479 flush_work(&p->discard_work);
2480
2481 destroy_swap_extents(p);
2482 if (p->flags & SWP_CONTINUED)
2483 free_swap_count_continuations(p);
2484
2485 if (!p->bdev || !bdev_nonrot(p->bdev))
2486 atomic_dec(&nr_rotate_swap);
2487
2488 mutex_lock(&swapon_mutex);
2489 spin_lock(&swap_lock);
2490 spin_lock(&p->lock);
2491 drain_mmlist();
2492
2493 /* wait for anyone still in scan_swap_map_slots */
2494 p->highest_bit = 0; /* cuts scans short */
2495 while (p->flags >= SWP_SCANNING) {
2496 spin_unlock(&p->lock);
2497 spin_unlock(&swap_lock);
2498 schedule_timeout_uninterruptible(1);
2499 spin_lock(&swap_lock);
2500 spin_lock(&p->lock);
2501 }
2502
2503 swap_file = p->swap_file;
2504 old_block_size = p->old_block_size;
2505 p->swap_file = NULL;
2506 p->max = 0;
2507 swap_map = p->swap_map;
2508 p->swap_map = NULL;
2509 cluster_info = p->cluster_info;
2510 p->cluster_info = NULL;
2511 frontswap_map = frontswap_map_get(p);
2512 spin_unlock(&p->lock);
2513 spin_unlock(&swap_lock);
2514 arch_swap_invalidate_area(p->type);
2515 frontswap_invalidate_area(p->type);
2516 frontswap_map_set(p, NULL);
2517 mutex_unlock(&swapon_mutex);
2518 free_percpu(p->percpu_cluster);
2519 p->percpu_cluster = NULL;
2520 free_percpu(p->cluster_next_cpu);
2521 p->cluster_next_cpu = NULL;
2522 vfree(swap_map);
2523 kvfree(cluster_info);
2524 kvfree(frontswap_map);
2525 /* Destroy swap account information */
2526 swap_cgroup_swapoff(p->type);
2527 exit_swap_address_space(p->type);
2528
2529 inode = mapping->host;
2530 if (S_ISBLK(inode->i_mode)) {
2531 struct block_device *bdev = I_BDEV(inode);
2532
2533 set_blocksize(bdev, old_block_size);
2534 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2535 }
2536
2537 inode_lock(inode);
2538 inode->i_flags &= ~S_SWAPFILE;
2539 inode_unlock(inode);
2540 filp_close(swap_file, NULL);
2541
2542 /*
2543 * Clear the SWP_USED flag after all resources are freed so that swapon
2544 * can reuse this swap_info in alloc_swap_info() safely. It is ok to
2545 * not hold p->lock after we cleared its SWP_WRITEOK.
2546 */
2547 spin_lock(&swap_lock);
2548 p->flags = 0;
2549 spin_unlock(&swap_lock);
2550
2551 err = 0;
2552 atomic_inc(&proc_poll_event);
2553 wake_up_interruptible(&proc_poll_wait);
2554
2555 out_dput:
2556 filp_close(victim, NULL);
2557 out:
2558 putname(pathname);
2559 return err;
2560 }
2561
2562 #ifdef CONFIG_PROC_FS
swaps_poll(struct file * file,poll_table * wait)2563 static __poll_t swaps_poll(struct file *file, poll_table *wait)
2564 {
2565 struct seq_file *seq = file->private_data;
2566
2567 poll_wait(file, &proc_poll_wait, wait);
2568
2569 if (seq->poll_event != atomic_read(&proc_poll_event)) {
2570 seq->poll_event = atomic_read(&proc_poll_event);
2571 return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2572 }
2573
2574 return EPOLLIN | EPOLLRDNORM;
2575 }
2576
2577 /* iterator */
swap_start(struct seq_file * swap,loff_t * pos)2578 static void *swap_start(struct seq_file *swap, loff_t *pos)
2579 {
2580 struct swap_info_struct *si;
2581 int type;
2582 loff_t l = *pos;
2583
2584 mutex_lock(&swapon_mutex);
2585
2586 if (!l)
2587 return SEQ_START_TOKEN;
2588
2589 for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2590 if (!(si->flags & SWP_USED) || !si->swap_map)
2591 continue;
2592 if (!--l)
2593 return si;
2594 }
2595
2596 return NULL;
2597 }
2598
swap_next(struct seq_file * swap,void * v,loff_t * pos)2599 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2600 {
2601 struct swap_info_struct *si = v;
2602 int type;
2603
2604 if (v == SEQ_START_TOKEN)
2605 type = 0;
2606 else
2607 type = si->type + 1;
2608
2609 ++(*pos);
2610 for (; (si = swap_type_to_swap_info(type)); type++) {
2611 if (!(si->flags & SWP_USED) || !si->swap_map)
2612 continue;
2613 return si;
2614 }
2615
2616 return NULL;
2617 }
2618
swap_stop(struct seq_file * swap,void * v)2619 static void swap_stop(struct seq_file *swap, void *v)
2620 {
2621 mutex_unlock(&swapon_mutex);
2622 }
2623
swap_show(struct seq_file * swap,void * v)2624 static int swap_show(struct seq_file *swap, void *v)
2625 {
2626 struct swap_info_struct *si = v;
2627 struct file *file;
2628 int len;
2629 unsigned long bytes, inuse;
2630
2631 if (si == SEQ_START_TOKEN) {
2632 seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
2633 return 0;
2634 }
2635
2636 bytes = si->pages << (PAGE_SHIFT - 10);
2637 inuse = READ_ONCE(si->inuse_pages) << (PAGE_SHIFT - 10);
2638
2639 file = si->swap_file;
2640 len = seq_file_path(swap, file, " \t\n\\");
2641 seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n",
2642 len < 40 ? 40 - len : 1, " ",
2643 S_ISBLK(file_inode(file)->i_mode) ?
2644 "partition" : "file\t",
2645 bytes, bytes < 10000000 ? "\t" : "",
2646 inuse, inuse < 10000000 ? "\t" : "",
2647 si->prio);
2648 return 0;
2649 }
2650
2651 static const struct seq_operations swaps_op = {
2652 .start = swap_start,
2653 .next = swap_next,
2654 .stop = swap_stop,
2655 .show = swap_show
2656 };
2657
swaps_open(struct inode * inode,struct file * file)2658 static int swaps_open(struct inode *inode, struct file *file)
2659 {
2660 struct seq_file *seq;
2661 int ret;
2662
2663 ret = seq_open(file, &swaps_op);
2664 if (ret)
2665 return ret;
2666
2667 seq = file->private_data;
2668 seq->poll_event = atomic_read(&proc_poll_event);
2669 return 0;
2670 }
2671
2672 static const struct proc_ops swaps_proc_ops = {
2673 .proc_flags = PROC_ENTRY_PERMANENT,
2674 .proc_open = swaps_open,
2675 .proc_read = seq_read,
2676 .proc_lseek = seq_lseek,
2677 .proc_release = seq_release,
2678 .proc_poll = swaps_poll,
2679 };
2680
procswaps_init(void)2681 static int __init procswaps_init(void)
2682 {
2683 proc_create("swaps", 0, NULL, &swaps_proc_ops);
2684 return 0;
2685 }
2686 __initcall(procswaps_init);
2687 #endif /* CONFIG_PROC_FS */
2688
2689 #ifdef MAX_SWAPFILES_CHECK
max_swapfiles_check(void)2690 static int __init max_swapfiles_check(void)
2691 {
2692 MAX_SWAPFILES_CHECK();
2693 return 0;
2694 }
2695 late_initcall(max_swapfiles_check);
2696 #endif
2697
alloc_swap_info(void)2698 static struct swap_info_struct *alloc_swap_info(void)
2699 {
2700 struct swap_info_struct *p;
2701 struct swap_info_struct *defer = NULL;
2702 unsigned int type;
2703 int i;
2704
2705 p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2706 if (!p)
2707 return ERR_PTR(-ENOMEM);
2708
2709 if (percpu_ref_init(&p->users, swap_users_ref_free,
2710 PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
2711 kvfree(p);
2712 return ERR_PTR(-ENOMEM);
2713 }
2714
2715 spin_lock(&swap_lock);
2716 for (type = 0; type < nr_swapfiles; type++) {
2717 if (!(swap_info[type]->flags & SWP_USED))
2718 break;
2719 }
2720 if (type >= MAX_SWAPFILES) {
2721 spin_unlock(&swap_lock);
2722 percpu_ref_exit(&p->users);
2723 kvfree(p);
2724 return ERR_PTR(-EPERM);
2725 }
2726 if (type >= nr_swapfiles) {
2727 p->type = type;
2728 /*
2729 * Publish the swap_info_struct after initializing it.
2730 * Note that kvzalloc() above zeroes all its fields.
2731 */
2732 smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */
2733 nr_swapfiles++;
2734 } else {
2735 defer = p;
2736 p = swap_info[type];
2737 /*
2738 * Do not memset this entry: a racing procfs swap_next()
2739 * would be relying on p->type to remain valid.
2740 */
2741 }
2742 p->swap_extent_root = RB_ROOT;
2743 plist_node_init(&p->list, 0);
2744 for_each_node(i)
2745 plist_node_init(&p->avail_lists[i], 0);
2746 p->flags = SWP_USED;
2747 spin_unlock(&swap_lock);
2748 if (defer) {
2749 percpu_ref_exit(&defer->users);
2750 kvfree(defer);
2751 }
2752 spin_lock_init(&p->lock);
2753 spin_lock_init(&p->cont_lock);
2754 init_completion(&p->comp);
2755
2756 return p;
2757 }
2758
claim_swapfile(struct swap_info_struct * p,struct inode * inode)2759 static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
2760 {
2761 int error;
2762
2763 if (S_ISBLK(inode->i_mode)) {
2764 p->bdev = blkdev_get_by_dev(inode->i_rdev,
2765 FMODE_READ | FMODE_WRITE | FMODE_EXCL, p);
2766 if (IS_ERR(p->bdev)) {
2767 error = PTR_ERR(p->bdev);
2768 p->bdev = NULL;
2769 return error;
2770 }
2771 p->old_block_size = block_size(p->bdev);
2772 error = set_blocksize(p->bdev, PAGE_SIZE);
2773 if (error < 0)
2774 return error;
2775 /*
2776 * Zoned block devices contain zones that have a sequential
2777 * write only restriction. Hence zoned block devices are not
2778 * suitable for swapping. Disallow them here.
2779 */
2780 if (bdev_is_zoned(p->bdev))
2781 return -EINVAL;
2782 p->flags |= SWP_BLKDEV;
2783 } else if (S_ISREG(inode->i_mode)) {
2784 p->bdev = inode->i_sb->s_bdev;
2785 }
2786
2787 return 0;
2788 }
2789
2790
2791 /*
2792 * Find out how many pages are allowed for a single swap device. There
2793 * are two limiting factors:
2794 * 1) the number of bits for the swap offset in the swp_entry_t type, and
2795 * 2) the number of bits in the swap pte, as defined by the different
2796 * architectures.
2797 *
2798 * In order to find the largest possible bit mask, a swap entry with
2799 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
2800 * decoded to a swp_entry_t again, and finally the swap offset is
2801 * extracted.
2802 *
2803 * This will mask all the bits from the initial ~0UL mask that can't
2804 * be encoded in either the swp_entry_t or the architecture definition
2805 * of a swap pte.
2806 */
generic_max_swapfile_size(void)2807 unsigned long generic_max_swapfile_size(void)
2808 {
2809 return swp_offset(pte_to_swp_entry(
2810 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
2811 }
2812
2813 /* Can be overridden by an architecture for additional checks. */
arch_max_swapfile_size(void)2814 __weak unsigned long arch_max_swapfile_size(void)
2815 {
2816 return generic_max_swapfile_size();
2817 }
2818
read_swap_header(struct swap_info_struct * p,union swap_header * swap_header,struct inode * inode)2819 static unsigned long read_swap_header(struct swap_info_struct *p,
2820 union swap_header *swap_header,
2821 struct inode *inode)
2822 {
2823 int i;
2824 unsigned long maxpages;
2825 unsigned long swapfilepages;
2826 unsigned long last_page;
2827
2828 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
2829 pr_err("Unable to find swap-space signature\n");
2830 return 0;
2831 }
2832
2833 /* swap partition endianness hack... */
2834 if (swab32(swap_header->info.version) == 1) {
2835 swab32s(&swap_header->info.version);
2836 swab32s(&swap_header->info.last_page);
2837 swab32s(&swap_header->info.nr_badpages);
2838 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2839 return 0;
2840 for (i = 0; i < swap_header->info.nr_badpages; i++)
2841 swab32s(&swap_header->info.badpages[i]);
2842 }
2843 /* Check the swap header's sub-version */
2844 if (swap_header->info.version != 1) {
2845 pr_warn("Unable to handle swap header version %d\n",
2846 swap_header->info.version);
2847 return 0;
2848 }
2849
2850 p->lowest_bit = 1;
2851 p->cluster_next = 1;
2852 p->cluster_nr = 0;
2853
2854 maxpages = swapfile_maximum_size;
2855 last_page = swap_header->info.last_page;
2856 if (!last_page) {
2857 pr_warn("Empty swap-file\n");
2858 return 0;
2859 }
2860 if (last_page > maxpages) {
2861 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
2862 maxpages << (PAGE_SHIFT - 10),
2863 last_page << (PAGE_SHIFT - 10));
2864 }
2865 if (maxpages > last_page) {
2866 maxpages = last_page + 1;
2867 /* p->max is an unsigned int: don't overflow it */
2868 if ((unsigned int)maxpages == 0)
2869 maxpages = UINT_MAX;
2870 }
2871 p->highest_bit = maxpages - 1;
2872
2873 if (!maxpages)
2874 return 0;
2875 swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
2876 if (swapfilepages && maxpages > swapfilepages) {
2877 pr_warn("Swap area shorter than signature indicates\n");
2878 return 0;
2879 }
2880 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
2881 return 0;
2882 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
2883 return 0;
2884
2885 return maxpages;
2886 }
2887
2888 #define SWAP_CLUSTER_INFO_COLS \
2889 DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
2890 #define SWAP_CLUSTER_SPACE_COLS \
2891 DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
2892 #define SWAP_CLUSTER_COLS \
2893 max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
2894
setup_swap_map_and_extents(struct swap_info_struct * p,union swap_header * swap_header,unsigned char * swap_map,struct swap_cluster_info * cluster_info,unsigned long maxpages,sector_t * span)2895 static int setup_swap_map_and_extents(struct swap_info_struct *p,
2896 union swap_header *swap_header,
2897 unsigned char *swap_map,
2898 struct swap_cluster_info *cluster_info,
2899 unsigned long maxpages,
2900 sector_t *span)
2901 {
2902 unsigned int j, k;
2903 unsigned int nr_good_pages;
2904 int nr_extents;
2905 unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
2906 unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS;
2907 unsigned long i, idx;
2908
2909 nr_good_pages = maxpages - 1; /* omit header page */
2910
2911 cluster_list_init(&p->free_clusters);
2912 cluster_list_init(&p->discard_clusters);
2913
2914 for (i = 0; i < swap_header->info.nr_badpages; i++) {
2915 unsigned int page_nr = swap_header->info.badpages[i];
2916 if (page_nr == 0 || page_nr > swap_header->info.last_page)
2917 return -EINVAL;
2918 if (page_nr < maxpages) {
2919 swap_map[page_nr] = SWAP_MAP_BAD;
2920 nr_good_pages--;
2921 /*
2922 * Haven't marked the cluster free yet, no list
2923 * operation involved
2924 */
2925 inc_cluster_info_page(p, cluster_info, page_nr);
2926 }
2927 }
2928
2929 /* Haven't marked the cluster free yet, no list operation involved */
2930 for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
2931 inc_cluster_info_page(p, cluster_info, i);
2932
2933 if (nr_good_pages) {
2934 swap_map[0] = SWAP_MAP_BAD;
2935 /*
2936 * Not mark the cluster free yet, no list
2937 * operation involved
2938 */
2939 inc_cluster_info_page(p, cluster_info, 0);
2940 p->max = maxpages;
2941 p->pages = nr_good_pages;
2942 nr_extents = setup_swap_extents(p, span);
2943 if (nr_extents < 0)
2944 return nr_extents;
2945 nr_good_pages = p->pages;
2946 }
2947 if (!nr_good_pages) {
2948 pr_warn("Empty swap-file\n");
2949 return -EINVAL;
2950 }
2951
2952 if (!cluster_info)
2953 return nr_extents;
2954
2955
2956 /*
2957 * Reduce false cache line sharing between cluster_info and
2958 * sharing same address space.
2959 */
2960 for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
2961 j = (k + col) % SWAP_CLUSTER_COLS;
2962 for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
2963 idx = i * SWAP_CLUSTER_COLS + j;
2964 if (idx >= nr_clusters)
2965 continue;
2966 if (cluster_count(&cluster_info[idx]))
2967 continue;
2968 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE);
2969 cluster_list_add_tail(&p->free_clusters, cluster_info,
2970 idx);
2971 }
2972 }
2973 return nr_extents;
2974 }
2975
SYSCALL_DEFINE2(swapon,const char __user *,specialfile,int,swap_flags)2976 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2977 {
2978 struct swap_info_struct *p;
2979 struct filename *name;
2980 struct file *swap_file = NULL;
2981 struct address_space *mapping;
2982 struct dentry *dentry;
2983 int prio;
2984 int error;
2985 union swap_header *swap_header;
2986 int nr_extents;
2987 sector_t span;
2988 unsigned long maxpages;
2989 unsigned char *swap_map = NULL;
2990 struct swap_cluster_info *cluster_info = NULL;
2991 unsigned long *frontswap_map = NULL;
2992 struct page *page = NULL;
2993 struct inode *inode = NULL;
2994 bool inced_nr_rotate_swap = false;
2995
2996 if (swap_flags & ~SWAP_FLAGS_VALID)
2997 return -EINVAL;
2998
2999 if (!capable(CAP_SYS_ADMIN))
3000 return -EPERM;
3001
3002 if (!swap_avail_heads)
3003 return -ENOMEM;
3004
3005 p = alloc_swap_info();
3006 if (IS_ERR(p))
3007 return PTR_ERR(p);
3008
3009 INIT_WORK(&p->discard_work, swap_discard_work);
3010
3011 name = getname(specialfile);
3012 if (IS_ERR(name)) {
3013 error = PTR_ERR(name);
3014 name = NULL;
3015 goto bad_swap;
3016 }
3017 swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0);
3018 if (IS_ERR(swap_file)) {
3019 error = PTR_ERR(swap_file);
3020 swap_file = NULL;
3021 goto bad_swap;
3022 }
3023
3024 p->swap_file = swap_file;
3025 mapping = swap_file->f_mapping;
3026 dentry = swap_file->f_path.dentry;
3027 inode = mapping->host;
3028
3029 error = claim_swapfile(p, inode);
3030 if (unlikely(error))
3031 goto bad_swap;
3032
3033 inode_lock(inode);
3034 if (d_unlinked(dentry) || cant_mount(dentry)) {
3035 error = -ENOENT;
3036 goto bad_swap_unlock_inode;
3037 }
3038 if (IS_SWAPFILE(inode)) {
3039 error = -EBUSY;
3040 goto bad_swap_unlock_inode;
3041 }
3042
3043 /*
3044 * Read the swap header.
3045 */
3046 if (!mapping->a_ops->read_folio) {
3047 error = -EINVAL;
3048 goto bad_swap_unlock_inode;
3049 }
3050 page = read_mapping_page(mapping, 0, swap_file);
3051 if (IS_ERR(page)) {
3052 error = PTR_ERR(page);
3053 goto bad_swap_unlock_inode;
3054 }
3055 swap_header = kmap(page);
3056
3057 maxpages = read_swap_header(p, swap_header, inode);
3058 if (unlikely(!maxpages)) {
3059 error = -EINVAL;
3060 goto bad_swap_unlock_inode;
3061 }
3062
3063 /* OK, set up the swap map and apply the bad block list */
3064 swap_map = vzalloc(maxpages);
3065 if (!swap_map) {
3066 error = -ENOMEM;
3067 goto bad_swap_unlock_inode;
3068 }
3069
3070 if (p->bdev && bdev_stable_writes(p->bdev))
3071 p->flags |= SWP_STABLE_WRITES;
3072
3073 if (p->bdev && p->bdev->bd_disk->fops->rw_page)
3074 p->flags |= SWP_SYNCHRONOUS_IO;
3075
3076 if (p->bdev && bdev_nonrot(p->bdev)) {
3077 int cpu;
3078 unsigned long ci, nr_cluster;
3079
3080 p->flags |= SWP_SOLIDSTATE;
3081 p->cluster_next_cpu = alloc_percpu(unsigned int);
3082 if (!p->cluster_next_cpu) {
3083 error = -ENOMEM;
3084 goto bad_swap_unlock_inode;
3085 }
3086 /*
3087 * select a random position to start with to help wear leveling
3088 * SSD
3089 */
3090 for_each_possible_cpu(cpu) {
3091 per_cpu(*p->cluster_next_cpu, cpu) =
3092 1 + prandom_u32_max(p->highest_bit);
3093 }
3094 nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3095
3096 cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info),
3097 GFP_KERNEL);
3098 if (!cluster_info) {
3099 error = -ENOMEM;
3100 goto bad_swap_unlock_inode;
3101 }
3102
3103 for (ci = 0; ci < nr_cluster; ci++)
3104 spin_lock_init(&((cluster_info + ci)->lock));
3105
3106 p->percpu_cluster = alloc_percpu(struct percpu_cluster);
3107 if (!p->percpu_cluster) {
3108 error = -ENOMEM;
3109 goto bad_swap_unlock_inode;
3110 }
3111 for_each_possible_cpu(cpu) {
3112 struct percpu_cluster *cluster;
3113 cluster = per_cpu_ptr(p->percpu_cluster, cpu);
3114 cluster_set_null(&cluster->index);
3115 }
3116 } else {
3117 atomic_inc(&nr_rotate_swap);
3118 inced_nr_rotate_swap = true;
3119 }
3120
3121 error = swap_cgroup_swapon(p->type, maxpages);
3122 if (error)
3123 goto bad_swap_unlock_inode;
3124
3125 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
3126 cluster_info, maxpages, &span);
3127 if (unlikely(nr_extents < 0)) {
3128 error = nr_extents;
3129 goto bad_swap_unlock_inode;
3130 }
3131 /* frontswap enabled? set up bit-per-page map for frontswap */
3132 if (IS_ENABLED(CONFIG_FRONTSWAP))
3133 frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages),
3134 sizeof(long),
3135 GFP_KERNEL);
3136
3137 if ((swap_flags & SWAP_FLAG_DISCARD) &&
3138 p->bdev && bdev_max_discard_sectors(p->bdev)) {
3139 /*
3140 * When discard is enabled for swap with no particular
3141 * policy flagged, we set all swap discard flags here in
3142 * order to sustain backward compatibility with older
3143 * swapon(8) releases.
3144 */
3145 p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3146 SWP_PAGE_DISCARD);
3147
3148 /*
3149 * By flagging sys_swapon, a sysadmin can tell us to
3150 * either do single-time area discards only, or to just
3151 * perform discards for released swap page-clusters.
3152 * Now it's time to adjust the p->flags accordingly.
3153 */
3154 if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3155 p->flags &= ~SWP_PAGE_DISCARD;
3156 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3157 p->flags &= ~SWP_AREA_DISCARD;
3158
3159 /* issue a swapon-time discard if it's still required */
3160 if (p->flags & SWP_AREA_DISCARD) {
3161 int err = discard_swap(p);
3162 if (unlikely(err))
3163 pr_err("swapon: discard_swap(%p): %d\n",
3164 p, err);
3165 }
3166 }
3167
3168 error = init_swap_address_space(p->type, maxpages);
3169 if (error)
3170 goto bad_swap_unlock_inode;
3171
3172 /*
3173 * Flush any pending IO and dirty mappings before we start using this
3174 * swap device.
3175 */
3176 inode->i_flags |= S_SWAPFILE;
3177 error = inode_drain_writes(inode);
3178 if (error) {
3179 inode->i_flags &= ~S_SWAPFILE;
3180 goto free_swap_address_space;
3181 }
3182
3183 mutex_lock(&swapon_mutex);
3184 prio = -1;
3185 if (swap_flags & SWAP_FLAG_PREFER)
3186 prio =
3187 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3188 enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
3189
3190 pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
3191 p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
3192 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
3193 (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
3194 (p->flags & SWP_DISCARDABLE) ? "D" : "",
3195 (p->flags & SWP_AREA_DISCARD) ? "s" : "",
3196 (p->flags & SWP_PAGE_DISCARD) ? "c" : "",
3197 (frontswap_map) ? "FS" : "");
3198
3199 mutex_unlock(&swapon_mutex);
3200 atomic_inc(&proc_poll_event);
3201 wake_up_interruptible(&proc_poll_wait);
3202
3203 error = 0;
3204 goto out;
3205 free_swap_address_space:
3206 exit_swap_address_space(p->type);
3207 bad_swap_unlock_inode:
3208 inode_unlock(inode);
3209 bad_swap:
3210 free_percpu(p->percpu_cluster);
3211 p->percpu_cluster = NULL;
3212 free_percpu(p->cluster_next_cpu);
3213 p->cluster_next_cpu = NULL;
3214 if (inode && S_ISBLK(inode->i_mode) && p->bdev) {
3215 set_blocksize(p->bdev, p->old_block_size);
3216 blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3217 }
3218 inode = NULL;
3219 destroy_swap_extents(p);
3220 swap_cgroup_swapoff(p->type);
3221 spin_lock(&swap_lock);
3222 p->swap_file = NULL;
3223 p->flags = 0;
3224 spin_unlock(&swap_lock);
3225 vfree(swap_map);
3226 kvfree(cluster_info);
3227 kvfree(frontswap_map);
3228 if (inced_nr_rotate_swap)
3229 atomic_dec(&nr_rotate_swap);
3230 if (swap_file)
3231 filp_close(swap_file, NULL);
3232 out:
3233 if (page && !IS_ERR(page)) {
3234 kunmap(page);
3235 put_page(page);
3236 }
3237 if (name)
3238 putname(name);
3239 if (inode)
3240 inode_unlock(inode);
3241 if (!error)
3242 enable_swap_slots_cache();
3243 return error;
3244 }
3245
si_swapinfo(struct sysinfo * val)3246 void si_swapinfo(struct sysinfo *val)
3247 {
3248 unsigned int type;
3249 unsigned long nr_to_be_unused = 0;
3250
3251 spin_lock(&swap_lock);
3252 for (type = 0; type < nr_swapfiles; type++) {
3253 struct swap_info_struct *si = swap_info[type];
3254
3255 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3256 nr_to_be_unused += READ_ONCE(si->inuse_pages);
3257 }
3258 val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3259 val->totalswap = total_swap_pages + nr_to_be_unused;
3260 spin_unlock(&swap_lock);
3261 }
3262
3263 /*
3264 * Verify that a swap entry is valid and increment its swap map count.
3265 *
3266 * Returns error code in following case.
3267 * - success -> 0
3268 * - swp_entry is invalid -> EINVAL
3269 * - swp_entry is migration entry -> EINVAL
3270 * - swap-cache reference is requested but there is already one. -> EEXIST
3271 * - swap-cache reference is requested but the entry is not used. -> ENOENT
3272 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3273 */
__swap_duplicate(swp_entry_t entry,unsigned char usage)3274 static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
3275 {
3276 struct swap_info_struct *p;
3277 struct swap_cluster_info *ci;
3278 unsigned long offset;
3279 unsigned char count;
3280 unsigned char has_cache;
3281 int err;
3282
3283 p = get_swap_device(entry);
3284 if (!p)
3285 return -EINVAL;
3286
3287 offset = swp_offset(entry);
3288 ci = lock_cluster_or_swap_info(p, offset);
3289
3290 count = p->swap_map[offset];
3291
3292 /*
3293 * swapin_readahead() doesn't check if a swap entry is valid, so the
3294 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3295 */
3296 if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3297 err = -ENOENT;
3298 goto unlock_out;
3299 }
3300
3301 has_cache = count & SWAP_HAS_CACHE;
3302 count &= ~SWAP_HAS_CACHE;
3303 err = 0;
3304
3305 if (usage == SWAP_HAS_CACHE) {
3306
3307 /* set SWAP_HAS_CACHE if there is no cache and entry is used */
3308 if (!has_cache && count)
3309 has_cache = SWAP_HAS_CACHE;
3310 else if (has_cache) /* someone else added cache */
3311 err = -EEXIST;
3312 else /* no users remaining */
3313 err = -ENOENT;
3314
3315 } else if (count || has_cache) {
3316
3317 if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3318 count += usage;
3319 else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
3320 err = -EINVAL;
3321 else if (swap_count_continued(p, offset, count))
3322 count = COUNT_CONTINUED;
3323 else
3324 err = -ENOMEM;
3325 } else
3326 err = -ENOENT; /* unused swap entry */
3327
3328 WRITE_ONCE(p->swap_map[offset], count | has_cache);
3329
3330 unlock_out:
3331 unlock_cluster_or_swap_info(p, ci);
3332 put_swap_device(p);
3333 return err;
3334 }
3335
3336 /*
3337 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3338 * (in which case its reference count is never incremented).
3339 */
swap_shmem_alloc(swp_entry_t entry)3340 void swap_shmem_alloc(swp_entry_t entry)
3341 {
3342 __swap_duplicate(entry, SWAP_MAP_SHMEM);
3343 }
3344
3345 /*
3346 * Increase reference count of swap entry by 1.
3347 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3348 * but could not be atomically allocated. Returns 0, just as if it succeeded,
3349 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3350 * might occur if a page table entry has got corrupted.
3351 */
swap_duplicate(swp_entry_t entry)3352 int swap_duplicate(swp_entry_t entry)
3353 {
3354 int err = 0;
3355
3356 while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
3357 err = add_swap_count_continuation(entry, GFP_ATOMIC);
3358 return err;
3359 }
3360
3361 /*
3362 * @entry: swap entry for which we allocate swap cache.
3363 *
3364 * Called when allocating swap cache for existing swap entry,
3365 * This can return error codes. Returns 0 at success.
3366 * -EEXIST means there is a swap cache.
3367 * Note: return code is different from swap_duplicate().
3368 */
swapcache_prepare(swp_entry_t entry)3369 int swapcache_prepare(swp_entry_t entry)
3370 {
3371 return __swap_duplicate(entry, SWAP_HAS_CACHE);
3372 }
3373
swp_swap_info(swp_entry_t entry)3374 struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3375 {
3376 return swap_type_to_swap_info(swp_type(entry));
3377 }
3378
page_swap_info(struct page * page)3379 struct swap_info_struct *page_swap_info(struct page *page)
3380 {
3381 swp_entry_t entry = { .val = page_private(page) };
3382 return swp_swap_info(entry);
3383 }
3384
3385 /*
3386 * out-of-line methods to avoid include hell.
3387 */
swapcache_mapping(struct folio * folio)3388 struct address_space *swapcache_mapping(struct folio *folio)
3389 {
3390 return page_swap_info(&folio->page)->swap_file->f_mapping;
3391 }
3392 EXPORT_SYMBOL_GPL(swapcache_mapping);
3393
__page_file_index(struct page * page)3394 pgoff_t __page_file_index(struct page *page)
3395 {
3396 swp_entry_t swap = { .val = page_private(page) };
3397 return swp_offset(swap);
3398 }
3399 EXPORT_SYMBOL_GPL(__page_file_index);
3400
3401 /*
3402 * add_swap_count_continuation - called when a swap count is duplicated
3403 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3404 * page of the original vmalloc'ed swap_map, to hold the continuation count
3405 * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
3406 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3407 *
3408 * These continuation pages are seldom referenced: the common paths all work
3409 * on the original swap_map, only referring to a continuation page when the
3410 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3411 *
3412 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3413 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3414 * can be called after dropping locks.
3415 */
add_swap_count_continuation(swp_entry_t entry,gfp_t gfp_mask)3416 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3417 {
3418 struct swap_info_struct *si;
3419 struct swap_cluster_info *ci;
3420 struct page *head;
3421 struct page *page;
3422 struct page *list_page;
3423 pgoff_t offset;
3424 unsigned char count;
3425 int ret = 0;
3426
3427 /*
3428 * When debugging, it's easier to use __GFP_ZERO here; but it's better
3429 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3430 */
3431 page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3432
3433 si = get_swap_device(entry);
3434 if (!si) {
3435 /*
3436 * An acceptable race has occurred since the failing
3437 * __swap_duplicate(): the swap device may be swapoff
3438 */
3439 goto outer;
3440 }
3441 spin_lock(&si->lock);
3442
3443 offset = swp_offset(entry);
3444
3445 ci = lock_cluster(si, offset);
3446
3447 count = swap_count(si->swap_map[offset]);
3448
3449 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3450 /*
3451 * The higher the swap count, the more likely it is that tasks
3452 * will race to add swap count continuation: we need to avoid
3453 * over-provisioning.
3454 */
3455 goto out;
3456 }
3457
3458 if (!page) {
3459 ret = -ENOMEM;
3460 goto out;
3461 }
3462
3463 /*
3464 * We are fortunate that although vmalloc_to_page uses pte_offset_map,
3465 * no architecture is using highmem pages for kernel page tables: so it
3466 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
3467 */
3468 head = vmalloc_to_page(si->swap_map + offset);
3469 offset &= ~PAGE_MASK;
3470
3471 spin_lock(&si->cont_lock);
3472 /*
3473 * Page allocation does not initialize the page's lru field,
3474 * but it does always reset its private field.
3475 */
3476 if (!page_private(head)) {
3477 BUG_ON(count & COUNT_CONTINUED);
3478 INIT_LIST_HEAD(&head->lru);
3479 set_page_private(head, SWP_CONTINUED);
3480 si->flags |= SWP_CONTINUED;
3481 }
3482
3483 list_for_each_entry(list_page, &head->lru, lru) {
3484 unsigned char *map;
3485
3486 /*
3487 * If the previous map said no continuation, but we've found
3488 * a continuation page, free our allocation and use this one.
3489 */
3490 if (!(count & COUNT_CONTINUED))
3491 goto out_unlock_cont;
3492
3493 map = kmap_atomic(list_page) + offset;
3494 count = *map;
3495 kunmap_atomic(map);
3496
3497 /*
3498 * If this continuation count now has some space in it,
3499 * free our allocation and use this one.
3500 */
3501 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3502 goto out_unlock_cont;
3503 }
3504
3505 list_add_tail(&page->lru, &head->lru);
3506 page = NULL; /* now it's attached, don't free it */
3507 out_unlock_cont:
3508 spin_unlock(&si->cont_lock);
3509 out:
3510 unlock_cluster(ci);
3511 spin_unlock(&si->lock);
3512 put_swap_device(si);
3513 outer:
3514 if (page)
3515 __free_page(page);
3516 return ret;
3517 }
3518
3519 /*
3520 * swap_count_continued - when the original swap_map count is incremented
3521 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3522 * into, carry if so, or else fail until a new continuation page is allocated;
3523 * when the original swap_map count is decremented from 0 with continuation,
3524 * borrow from the continuation and report whether it still holds more.
3525 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3526 * lock.
3527 */
swap_count_continued(struct swap_info_struct * si,pgoff_t offset,unsigned char count)3528 static bool swap_count_continued(struct swap_info_struct *si,
3529 pgoff_t offset, unsigned char count)
3530 {
3531 struct page *head;
3532 struct page *page;
3533 unsigned char *map;
3534 bool ret;
3535
3536 head = vmalloc_to_page(si->swap_map + offset);
3537 if (page_private(head) != SWP_CONTINUED) {
3538 BUG_ON(count & COUNT_CONTINUED);
3539 return false; /* need to add count continuation */
3540 }
3541
3542 spin_lock(&si->cont_lock);
3543 offset &= ~PAGE_MASK;
3544 page = list_next_entry(head, lru);
3545 map = kmap_atomic(page) + offset;
3546
3547 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
3548 goto init_map; /* jump over SWAP_CONT_MAX checks */
3549
3550 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3551 /*
3552 * Think of how you add 1 to 999
3553 */
3554 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3555 kunmap_atomic(map);
3556 page = list_next_entry(page, lru);
3557 BUG_ON(page == head);
3558 map = kmap_atomic(page) + offset;
3559 }
3560 if (*map == SWAP_CONT_MAX) {
3561 kunmap_atomic(map);
3562 page = list_next_entry(page, lru);
3563 if (page == head) {
3564 ret = false; /* add count continuation */
3565 goto out;
3566 }
3567 map = kmap_atomic(page) + offset;
3568 init_map: *map = 0; /* we didn't zero the page */
3569 }
3570 *map += 1;
3571 kunmap_atomic(map);
3572 while ((page = list_prev_entry(page, lru)) != head) {
3573 map = kmap_atomic(page) + offset;
3574 *map = COUNT_CONTINUED;
3575 kunmap_atomic(map);
3576 }
3577 ret = true; /* incremented */
3578
3579 } else { /* decrementing */
3580 /*
3581 * Think of how you subtract 1 from 1000
3582 */
3583 BUG_ON(count != COUNT_CONTINUED);
3584 while (*map == COUNT_CONTINUED) {
3585 kunmap_atomic(map);
3586 page = list_next_entry(page, lru);
3587 BUG_ON(page == head);
3588 map = kmap_atomic(page) + offset;
3589 }
3590 BUG_ON(*map == 0);
3591 *map -= 1;
3592 if (*map == 0)
3593 count = 0;
3594 kunmap_atomic(map);
3595 while ((page = list_prev_entry(page, lru)) != head) {
3596 map = kmap_atomic(page) + offset;
3597 *map = SWAP_CONT_MAX | count;
3598 count = COUNT_CONTINUED;
3599 kunmap_atomic(map);
3600 }
3601 ret = count == COUNT_CONTINUED;
3602 }
3603 out:
3604 spin_unlock(&si->cont_lock);
3605 return ret;
3606 }
3607
3608 /*
3609 * free_swap_count_continuations - swapoff free all the continuation pages
3610 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3611 */
free_swap_count_continuations(struct swap_info_struct * si)3612 static void free_swap_count_continuations(struct swap_info_struct *si)
3613 {
3614 pgoff_t offset;
3615
3616 for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3617 struct page *head;
3618 head = vmalloc_to_page(si->swap_map + offset);
3619 if (page_private(head)) {
3620 struct page *page, *next;
3621
3622 list_for_each_entry_safe(page, next, &head->lru, lru) {
3623 list_del(&page->lru);
3624 __free_page(page);
3625 }
3626 }
3627 }
3628 }
3629
3630 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
__cgroup_throttle_swaprate(struct page * page,gfp_t gfp_mask)3631 void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
3632 {
3633 struct swap_info_struct *si, *next;
3634 int nid = page_to_nid(page);
3635
3636 if (!(gfp_mask & __GFP_IO))
3637 return;
3638
3639 if (!blk_cgroup_congested())
3640 return;
3641
3642 /*
3643 * We've already scheduled a throttle, avoid taking the global swap
3644 * lock.
3645 */
3646 if (current->throttle_queue)
3647 return;
3648
3649 spin_lock(&swap_avail_lock);
3650 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
3651 avail_lists[nid]) {
3652 if (si->bdev) {
3653 blkcg_schedule_throttle(si->bdev->bd_disk, true);
3654 break;
3655 }
3656 }
3657 spin_unlock(&swap_avail_lock);
3658 }
3659 #endif
3660
swapfile_init(void)3661 static int __init swapfile_init(void)
3662 {
3663 int nid;
3664
3665 swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3666 GFP_KERNEL);
3667 if (!swap_avail_heads) {
3668 pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3669 return -ENOMEM;
3670 }
3671
3672 for_each_node(nid)
3673 plist_head_init(&swap_avail_heads[nid]);
3674
3675 swapfile_maximum_size = arch_max_swapfile_size();
3676
3677 #ifdef CONFIG_MIGRATION
3678 if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS))
3679 swap_migration_ad_supported = true;
3680 #endif /* CONFIG_MIGRATION */
3681
3682 return 0;
3683 }
3684 subsys_initcall(swapfile_init);
3685