1 /*
2  *  linux/mm/swapfile.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *  Swap reorganised 29.12.95, Stephen Tweedie
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/smp_lock.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/swap.h>
12 #include <linux/swapctl.h>
13 #include <linux/blkdev.h> /* for blk_size */
14 #include <linux/vmalloc.h>
15 #include <linux/pagemap.h>
16 #include <linux/shm.h>
17 
18 #include <asm/pgtable.h>
19 
20 spinlock_t swaplock = SPIN_LOCK_UNLOCKED;
21 unsigned int nr_swapfiles;
22 int total_swap_pages;
23 static int swap_overflow;
24 
25 static const char Bad_file[] = "Bad swap file entry ";
26 static const char Unused_file[] = "Unused swap file entry ";
27 static const char Bad_offset[] = "Bad swap offset entry ";
28 static const char Unused_offset[] = "Unused swap offset entry ";
29 
30 struct swap_list_t swap_list = {-1, -1};
31 
32 struct swap_info_struct swap_info[MAX_SWAPFILES];
33 
34 #define SWAPFILE_CLUSTER 256
35 
scan_swap_map(struct swap_info_struct * si)36 static inline int scan_swap_map(struct swap_info_struct *si)
37 {
38 	unsigned long offset;
39 	/*
40 	 * We try to cluster swap pages by allocating them
41 	 * sequentially in swap.  Once we've allocated
42 	 * SWAPFILE_CLUSTER pages this way, however, we resort to
43 	 * first-free allocation, starting a new cluster.  This
44 	 * prevents us from scattering swap pages all over the entire
45 	 * swap partition, so that we reduce overall disk seek times
46 	 * between swap pages.  -- sct */
47 	if (si->cluster_nr) {
48 		while (si->cluster_next <= si->highest_bit) {
49 			offset = si->cluster_next++;
50 			if (si->swap_map[offset])
51 				continue;
52 			si->cluster_nr--;
53 			goto got_page;
54 		}
55 	}
56 	si->cluster_nr = SWAPFILE_CLUSTER;
57 
58 	/* try to find an empty (even not aligned) cluster. */
59 	offset = si->lowest_bit;
60  check_next_cluster:
61 	if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit)
62 	{
63 		int nr;
64 		for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++)
65 			if (si->swap_map[nr])
66 			{
67 				offset = nr+1;
68 				goto check_next_cluster;
69 			}
70 		/* We found a completly empty cluster, so start
71 		 * using it.
72 		 */
73 		goto got_page;
74 	}
75 	/* No luck, so now go finegrined as usual. -Andrea */
76 	for (offset = si->lowest_bit; offset <= si->highest_bit ; offset++) {
77 		if (si->swap_map[offset])
78 			continue;
79 		si->lowest_bit = offset+1;
80 	got_page:
81 		if (offset == si->lowest_bit)
82 			si->lowest_bit++;
83 		if (offset == si->highest_bit)
84 			si->highest_bit--;
85 		if (si->lowest_bit > si->highest_bit) {
86 			si->lowest_bit = si->max;
87 			si->highest_bit = 0;
88 		}
89 		si->swap_map[offset] = 1;
90 		nr_swap_pages--;
91 		si->cluster_next = offset+1;
92 		return offset;
93 	}
94 	si->lowest_bit = si->max;
95 	si->highest_bit = 0;
96 	return 0;
97 }
98 
get_swap_page(void)99 swp_entry_t get_swap_page(void)
100 {
101 	struct swap_info_struct * p;
102 	unsigned long offset;
103 	swp_entry_t entry;
104 	int type, wrapped = 0;
105 
106 	entry.val = 0;	/* Out of memory */
107 	swap_list_lock();
108 	type = swap_list.next;
109 	if (type < 0)
110 		goto out;
111 	if (nr_swap_pages <= 0)
112 		goto out;
113 
114 	while (1) {
115 		p = &swap_info[type];
116 		if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
117 			swap_device_lock(p);
118 			offset = scan_swap_map(p);
119 			swap_device_unlock(p);
120 			if (offset) {
121 				entry = SWP_ENTRY(type,offset);
122 				type = swap_info[type].next;
123 				if (type < 0 ||
124 					p->prio != swap_info[type].prio) {
125 						swap_list.next = swap_list.head;
126 				} else {
127 					swap_list.next = type;
128 				}
129 				goto out;
130 			}
131 		}
132 		type = p->next;
133 		if (!wrapped) {
134 			if (type < 0 || p->prio != swap_info[type].prio) {
135 				type = swap_list.head;
136 				wrapped = 1;
137 			}
138 		} else
139 			if (type < 0)
140 				goto out;	/* out of swap space */
141 	}
142 out:
143 	swap_list_unlock();
144 	return entry;
145 }
146 
swap_info_get(swp_entry_t entry)147 static struct swap_info_struct * swap_info_get(swp_entry_t entry)
148 {
149 	struct swap_info_struct * p;
150 	unsigned long offset, type;
151 
152 	if (!entry.val)
153 		goto out;
154 	type = SWP_TYPE(entry);
155 	if (type >= nr_swapfiles)
156 		goto bad_nofile;
157 	p = & swap_info[type];
158 	if (!(p->flags & SWP_USED))
159 		goto bad_device;
160 	offset = SWP_OFFSET(entry);
161 	if (offset >= p->max)
162 		goto bad_offset;
163 	if (!p->swap_map[offset])
164 		goto bad_free;
165 	swap_list_lock();
166 	if (p->prio > swap_info[swap_list.next].prio)
167 		swap_list.next = type;
168 	swap_device_lock(p);
169 	return p;
170 
171 bad_free:
172 	printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val);
173 	goto out;
174 bad_offset:
175 	printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val);
176 	goto out;
177 bad_device:
178 	printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val);
179 	goto out;
180 bad_nofile:
181 	printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val);
182 out:
183 	return NULL;
184 }
185 
swap_info_put(struct swap_info_struct * p)186 static void swap_info_put(struct swap_info_struct * p)
187 {
188 	swap_device_unlock(p);
189 	swap_list_unlock();
190 }
191 
swap_entry_free(struct swap_info_struct * p,unsigned long offset)192 static int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
193 {
194 	int count = p->swap_map[offset];
195 
196 	if (count < SWAP_MAP_MAX) {
197 		count--;
198 		p->swap_map[offset] = count;
199 		if (!count) {
200 			if (offset < p->lowest_bit)
201 				p->lowest_bit = offset;
202 			if (offset > p->highest_bit)
203 				p->highest_bit = offset;
204 			nr_swap_pages++;
205 		}
206 	}
207 	return count;
208 }
209 
210 /*
211  * Caller has made sure that the swapdevice corresponding to entry
212  * is still around or has not been recycled.
213  */
swap_free(swp_entry_t entry)214 void swap_free(swp_entry_t entry)
215 {
216 	struct swap_info_struct * p;
217 
218 	p = swap_info_get(entry);
219 	if (p) {
220 		swap_entry_free(p, SWP_OFFSET(entry));
221 		swap_info_put(p);
222 	}
223 }
224 
225 /*
226  * Check if we're the only user of a swap page,
227  * when the page is locked.
228  */
exclusive_swap_page(struct page * page)229 static int exclusive_swap_page(struct page *page)
230 {
231 	int retval = 0;
232 	struct swap_info_struct * p;
233 	swp_entry_t entry;
234 
235 	entry.val = page->index;
236 	p = swap_info_get(entry);
237 	if (p) {
238 		/* Is the only swap cache user the cache itself? */
239 		if (p->swap_map[SWP_OFFSET(entry)] == 1) {
240 			/* Recheck the page count with the pagecache lock held.. */
241 			spin_lock(&pagecache_lock);
242 			if (page_count(page) - !!page->buffers == 2)
243 				retval = 1;
244 			spin_unlock(&pagecache_lock);
245 		}
246 		swap_info_put(p);
247 	}
248 	return retval;
249 }
250 
251 /*
252  * We can use this swap cache entry directly
253  * if there are no other references to it.
254  *
255  * Here "exclusive_swap_page()" does the real
256  * work, but we opportunistically check whether
257  * we need to get all the locks first..
258  */
can_share_swap_page(struct page * page)259 int fastcall can_share_swap_page(struct page *page)
260 {
261 	int retval = 0;
262 
263 	if (!PageLocked(page))
264 		BUG();
265 	switch (page_count(page)) {
266 	case 3:
267 		if (!page->buffers)
268 			break;
269 		/* Fallthrough */
270 	case 2:
271 		if (!PageSwapCache(page))
272 			break;
273 		retval = exclusive_swap_page(page);
274 		break;
275 	case 1:
276 		if (PageReserved(page))
277 			break;
278 		retval = 1;
279 	}
280 	return retval;
281 }
282 
283 /*
284  * Work out if there are any other processes sharing this
285  * swap cache page. Free it if you can. Return success.
286  */
remove_exclusive_swap_page(struct page * page)287 int fastcall remove_exclusive_swap_page(struct page *page)
288 {
289 	int retval;
290 	struct swap_info_struct * p;
291 	swp_entry_t entry;
292 
293 	if (!PageLocked(page))
294 		BUG();
295 	if (!PageSwapCache(page))
296 		return 0;
297 	if (page_count(page) - !!page->buffers != 2)	/* 2: us + cache */
298 		return 0;
299 
300 	entry.val = page->index;
301 	p = swap_info_get(entry);
302 	if (!p)
303 		return 0;
304 
305 	/* Is the only swap cache user the cache itself? */
306 	retval = 0;
307 	if (p->swap_map[SWP_OFFSET(entry)] == 1) {
308 		/* Recheck the page count with the pagecache lock held.. */
309 		spin_lock(&pagecache_lock);
310 		if (page_count(page) - !!page->buffers == 2) {
311 			__delete_from_swap_cache(page);
312 			SetPageDirty(page);
313 			retval = 1;
314 		}
315 		spin_unlock(&pagecache_lock);
316 	}
317 	swap_info_put(p);
318 
319 	if (retval) {
320 		block_flushpage(page, 0);
321 		swap_free(entry);
322 		page_cache_release(page);
323 	}
324 
325 	return retval;
326 }
327 
328 /*
329  * Free the swap entry like above, but also try to
330  * free the page cache entry if it is the last user.
331  */
free_swap_and_cache(swp_entry_t entry)332 void free_swap_and_cache(swp_entry_t entry)
333 {
334 	struct swap_info_struct * p;
335 	struct page *page = NULL;
336 
337 	p = swap_info_get(entry);
338 	if (p) {
339 		if (swap_entry_free(p, SWP_OFFSET(entry)) == 1)
340 			page = find_trylock_page(&swapper_space, entry.val);
341 		swap_info_put(p);
342 	}
343 	if (page) {
344 		page_cache_get(page);
345 		/* Only cache user (+us), or swap space full? Free it! */
346 		if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) {
347 			delete_from_swap_cache(page);
348 			SetPageDirty(page);
349 		}
350 		UnlockPage(page);
351 		page_cache_release(page);
352 	}
353 }
354 
355 /*
356  * The swap entry has been read in advance, and we return 1 to indicate
357  * that the page has been used or is no longer needed.
358  *
359  * Always set the resulting pte to be nowrite (the same as COW pages
360  * after one process has exited).  We don't know just how many PTEs will
361  * share this swap entry, so be cautious and let do_wp_page work out
362  * what to do if a write is requested later.
363  */
364 /* mmlist_lock and vma->vm_mm->page_table_lock are held */
unuse_pte(struct vm_area_struct * vma,unsigned long address,pte_t * dir,swp_entry_t entry,struct page * page)365 static inline void unuse_pte(struct vm_area_struct * vma, unsigned long address,
366 	pte_t *dir, swp_entry_t entry, struct page* page)
367 {
368 	pte_t pte = *dir;
369 
370 	if (likely(pte_to_swp_entry(pte).val != entry.val))
371 		return;
372 	if (unlikely(pte_none(pte) || pte_present(pte)))
373 		return;
374 	get_page(page);
375 	set_pte(dir, pte_mkold(mk_pte(page, vma->vm_page_prot)));
376 	swap_free(entry);
377 	++vma->vm_mm->rss;
378 }
379 
380 /* mmlist_lock and vma->vm_mm->page_table_lock are held */
unuse_pmd(struct vm_area_struct * vma,pmd_t * dir,unsigned long address,unsigned long size,unsigned long offset,swp_entry_t entry,struct page * page)381 static inline void unuse_pmd(struct vm_area_struct * vma, pmd_t *dir,
382 	unsigned long address, unsigned long size, unsigned long offset,
383 	swp_entry_t entry, struct page* page)
384 {
385 	pte_t * pte;
386 	unsigned long end;
387 
388 	if (pmd_none(*dir))
389 		return;
390 	if (pmd_bad(*dir)) {
391 		pmd_ERROR(*dir);
392 		pmd_clear(dir);
393 		return;
394 	}
395 	pte = pte_offset(dir, address);
396 	offset += address & PMD_MASK;
397 	address &= ~PMD_MASK;
398 	end = address + size;
399 	if (end > PMD_SIZE)
400 		end = PMD_SIZE;
401 	do {
402 		unuse_pte(vma, offset+address-vma->vm_start, pte, entry, page);
403 		address += PAGE_SIZE;
404 		pte++;
405 	} while (address && (address < end));
406 }
407 
408 /* mmlist_lock and vma->vm_mm->page_table_lock are held */
unuse_pgd(struct vm_area_struct * vma,pgd_t * dir,unsigned long address,unsigned long size,swp_entry_t entry,struct page * page)409 static inline void unuse_pgd(struct vm_area_struct * vma, pgd_t *dir,
410 	unsigned long address, unsigned long size,
411 	swp_entry_t entry, struct page* page)
412 {
413 	pmd_t * pmd;
414 	unsigned long offset, end;
415 
416 	if (pgd_none(*dir))
417 		return;
418 	if (pgd_bad(*dir)) {
419 		pgd_ERROR(*dir);
420 		pgd_clear(dir);
421 		return;
422 	}
423 	pmd = pmd_offset(dir, address);
424 	offset = address & PGDIR_MASK;
425 	address &= ~PGDIR_MASK;
426 	end = address + size;
427 	if (end > PGDIR_SIZE)
428 		end = PGDIR_SIZE;
429 	if (address >= end)
430 		BUG();
431 	do {
432 		unuse_pmd(vma, pmd, address, end - address, offset, entry,
433 			  page);
434 		address = (address + PMD_SIZE) & PMD_MASK;
435 		pmd++;
436 	} while (address && (address < end));
437 }
438 
439 /* mmlist_lock and vma->vm_mm->page_table_lock are held */
unuse_vma(struct vm_area_struct * vma,pgd_t * pgdir,swp_entry_t entry,struct page * page)440 static void unuse_vma(struct vm_area_struct * vma, pgd_t *pgdir,
441 			swp_entry_t entry, struct page* page)
442 {
443 	unsigned long start = vma->vm_start, end = vma->vm_end;
444 
445 	if (start >= end)
446 		BUG();
447 	do {
448 		unuse_pgd(vma, pgdir, start, end - start, entry, page);
449 		start = (start + PGDIR_SIZE) & PGDIR_MASK;
450 		pgdir++;
451 	} while (start && (start < end));
452 }
453 
unuse_process(struct mm_struct * mm,swp_entry_t entry,struct page * page)454 static void unuse_process(struct mm_struct * mm,
455 			swp_entry_t entry, struct page* page)
456 {
457 	struct vm_area_struct* vma;
458 
459 	/*
460 	 * Go through process' page directory.
461 	 */
462 	spin_lock(&mm->page_table_lock);
463 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
464 		pgd_t * pgd = pgd_offset(mm, vma->vm_start);
465 		unuse_vma(vma, pgd, entry, page);
466 	}
467 	spin_unlock(&mm->page_table_lock);
468 	return;
469 }
470 
471 /*
472  * Scan swap_map from current position to next entry still in use.
473  * Recycle to start on reaching the end, returning 0 when empty.
474  */
find_next_to_unuse(struct swap_info_struct * si,int prev)475 static int find_next_to_unuse(struct swap_info_struct *si, int prev)
476 {
477 	int max = si->max;
478 	int i = prev;
479 	int count;
480 
481 	/*
482 	 * No need for swap_device_lock(si) here: we're just looking
483 	 * for whether an entry is in use, not modifying it; false
484 	 * hits are okay, and sys_swapoff() has already prevented new
485 	 * allocations from this area (while holding swap_list_lock()).
486 	 */
487 	for (;;) {
488 		if (++i >= max) {
489 			if (!prev) {
490 				i = 0;
491 				break;
492 			}
493 			/*
494 			 * No entries in use at top of swap_map,
495 			 * loop back to start and recheck there.
496 			 */
497 			max = prev + 1;
498 			prev = 0;
499 			i = 1;
500 		}
501 		count = si->swap_map[i];
502 		if (count && count != SWAP_MAP_BAD)
503 			break;
504 	}
505 	return i;
506 }
507 
508 /*
509  * We completely avoid races by reading each swap page in advance,
510  * and then search for the process using it.  All the necessary
511  * page table adjustments can then be made atomically.
512  */
try_to_unuse(unsigned int type)513 static int try_to_unuse(unsigned int type)
514 {
515 	struct swap_info_struct * si = &swap_info[type];
516 	struct mm_struct *start_mm;
517 	unsigned short *swap_map;
518 	unsigned short swcount;
519 	struct page *page;
520 	swp_entry_t entry;
521 	int i = 0;
522 	int retval = 0;
523 	int reset_overflow = 0;
524 	int shmem;
525 
526 	/*
527 	 * When searching mms for an entry, a good strategy is to
528 	 * start at the first mm we freed the previous entry from
529 	 * (though actually we don't notice whether we or coincidence
530 	 * freed the entry).  Initialize this start_mm with a hold.
531 	 *
532 	 * A simpler strategy would be to start at the last mm we
533 	 * freed the previous entry from; but that would take less
534 	 * advantage of mmlist ordering (now preserved by swap_out()),
535 	 * which clusters forked address spaces together, most recent
536 	 * child immediately after parent.  If we race with dup_mmap(),
537 	 * we very much want to resolve parent before child, otherwise
538 	 * we may miss some entries: using last mm would invert that.
539 	 */
540 	start_mm = &init_mm;
541 	atomic_inc(&init_mm.mm_users);
542 
543 	/*
544 	 * Keep on scanning until all entries have gone.  Usually,
545 	 * one pass through swap_map is enough, but not necessarily:
546 	 * mmput() removes mm from mmlist before exit_mmap() and its
547 	 * zap_page_range().  That's not too bad, those entries are
548 	 * on their way out, and handled faster there than here.
549 	 * do_munmap() behaves similarly, taking the range out of mm's
550 	 * vma list before zap_page_range().  But unfortunately, when
551 	 * unmapping a part of a vma, it takes the whole out first,
552 	 * then reinserts what's left after (might even reschedule if
553 	 * open() method called) - so swap entries may be invisible
554 	 * to swapoff for a while, then reappear - but that is rare.
555 	 */
556 	while ((i = find_next_to_unuse(si, i))) {
557 		/*
558 		 * Get a page for the entry, using the existing swap
559 		 * cache page if there is one.  Otherwise, get a clean
560 		 * page and read the swap into it.
561 		 */
562 		swap_map = &si->swap_map[i];
563 		entry = SWP_ENTRY(type, i);
564 		page = read_swap_cache_async(entry);
565 		if (!page) {
566 			/*
567 			 * Either swap_duplicate() failed because entry
568 			 * has been freed independently, and will not be
569 			 * reused since sys_swapoff() already disabled
570 			 * allocation from here, or alloc_page() failed.
571 			 */
572 			if (!*swap_map)
573 				continue;
574 			retval = -ENOMEM;
575 			break;
576 		}
577 
578 		/*
579 		 * Don't hold on to start_mm if it looks like exiting.
580 		 */
581 		if (atomic_read(&start_mm->mm_users) == 1) {
582 			mmput(start_mm);
583 			start_mm = &init_mm;
584 			atomic_inc(&init_mm.mm_users);
585 		}
586 
587 		/*
588 		 * Wait for and lock page.  When do_swap_page races with
589 		 * try_to_unuse, do_swap_page can handle the fault much
590 		 * faster than try_to_unuse can locate the entry.  This
591 		 * apparently redundant "wait_on_page" lets try_to_unuse
592 		 * defer to do_swap_page in such a case - in some tests,
593 		 * do_swap_page and try_to_unuse repeatedly compete.
594 		 */
595 		wait_on_page(page);
596 		lock_page(page);
597 
598 		/*
599 		 * Remove all references to entry, without blocking.
600 		 * Whenever we reach init_mm, there's no address space
601 		 * to search, but use it as a reminder to search shmem.
602 		 */
603 		shmem = 0;
604 		swcount = *swap_map;
605 		if (swcount > 1) {
606 			flush_page_to_ram(page);
607 			if (start_mm == &init_mm)
608 				shmem = shmem_unuse(entry, page);
609 			else
610 				unuse_process(start_mm, entry, page);
611 		}
612 		if (*swap_map > 1) {
613 			int set_start_mm = (*swap_map >= swcount);
614 			struct list_head *p = &start_mm->mmlist;
615 			struct mm_struct *new_start_mm = start_mm;
616 			struct mm_struct *mm;
617 
618 			spin_lock(&mmlist_lock);
619 			while (*swap_map > 1 &&
620 					(p = p->next) != &start_mm->mmlist) {
621 				mm = list_entry(p, struct mm_struct, mmlist);
622 				swcount = *swap_map;
623 				if (mm == &init_mm) {
624 					set_start_mm = 1;
625 					spin_unlock(&mmlist_lock);
626 					shmem = shmem_unuse(entry, page);
627 					spin_lock(&mmlist_lock);
628 				} else
629 					unuse_process(mm, entry, page);
630 				if (set_start_mm && *swap_map < swcount) {
631 					new_start_mm = mm;
632 					set_start_mm = 0;
633 				}
634 			}
635 			atomic_inc(&new_start_mm->mm_users);
636 			spin_unlock(&mmlist_lock);
637 			mmput(start_mm);
638 			start_mm = new_start_mm;
639 		}
640 
641 		/*
642 		 * How could swap count reach 0x7fff when the maximum
643 		 * pid is 0x7fff, and there's no way to repeat a swap
644 		 * page within an mm (except in shmem, where it's the
645 		 * shared object which takes the reference count)?
646 		 * We believe SWAP_MAP_MAX cannot occur in Linux 2.4.
647 		 *
648 		 * If that's wrong, then we should worry more about
649 		 * exit_mmap() and do_munmap() cases described above:
650 		 * we might be resetting SWAP_MAP_MAX too early here.
651 		 * We know "Undead"s can happen, they're okay, so don't
652 		 * report them; but do report if we reset SWAP_MAP_MAX.
653 		 */
654 		if (*swap_map == SWAP_MAP_MAX) {
655 			swap_list_lock();
656 			swap_device_lock(si);
657 			nr_swap_pages++;
658 			*swap_map = 1;
659 			swap_device_unlock(si);
660 			swap_list_unlock();
661 			reset_overflow = 1;
662 		}
663 
664 		/*
665 		 * If a reference remains (rare), we would like to leave
666 		 * the page in the swap cache; but try_to_swap_out could
667 		 * then re-duplicate the entry once we drop page lock,
668 		 * so we might loop indefinitely; also, that page could
669 		 * not be swapped out to other storage meanwhile.  So:
670 		 * delete from cache even if there's another reference,
671 		 * after ensuring that the data has been saved to disk -
672 		 * since if the reference remains (rarer), it will be
673 		 * read from disk into another page.  Splitting into two
674 		 * pages would be incorrect if swap supported "shared
675 		 * private" pages, but they are handled by tmpfs files.
676 		 *
677 		 * Note shmem_unuse already deleted swappage from cache,
678 		 * unless corresponding filepage found already in cache:
679 		 * in which case it left swappage in cache, lowered its
680 		 * swap count to pass quickly through the loops above,
681 		 * and now we must reincrement count to try again later.
682 		 */
683 		if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) {
684 			rw_swap_page(WRITE, page);
685 			lock_page(page);
686 		}
687 		if (PageSwapCache(page)) {
688 			if (shmem)
689 				swap_duplicate(entry);
690 			else
691 				delete_from_swap_cache(page);
692 		}
693 
694 		/*
695 		 * So we could skip searching mms once swap count went
696 		 * to 1, we did not mark any present ptes as dirty: must
697 		 * mark page dirty so try_to_swap_out will preserve it.
698 		 */
699 		SetPageDirty(page);
700 		UnlockPage(page);
701 		page_cache_release(page);
702 
703 		/*
704 		 * Make sure that we aren't completely killing
705 		 * interactive performance.  Interruptible check on
706 		 * signal_pending() would be nice, but changes the spec?
707 		 */
708 		if (current->need_resched)
709 			schedule();
710 	}
711 
712 	mmput(start_mm);
713 	if (reset_overflow) {
714 		printk(KERN_WARNING "swapoff: cleared swap entry overflow\n");
715 		swap_overflow = 0;
716 	}
717 	return retval;
718 }
719 
sys_swapoff(const char * specialfile)720 asmlinkage long sys_swapoff(const char * specialfile)
721 {
722 	struct swap_info_struct * p = NULL;
723 	unsigned short *swap_map;
724 	struct nameidata nd;
725 	int i, type, prev;
726 	int err;
727 
728 	if (!capable(CAP_SYS_ADMIN))
729 		return -EPERM;
730 
731 	err = user_path_walk(specialfile, &nd);
732 	if (err)
733 		goto out;
734 
735 	lock_kernel();
736 	prev = -1;
737 	swap_list_lock();
738 	for (type = swap_list.head; type >= 0; type = swap_info[type].next) {
739 		p = swap_info + type;
740 		if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
741 			if (p->swap_file == nd.dentry ||
742 			    (S_ISBLK(nd.dentry->d_inode->i_mode) &&
743 			    p->swap_device == nd.dentry->d_inode->i_rdev))
744 				break;
745 		}
746 		prev = type;
747 	}
748 	err = -EINVAL;
749 	if (type < 0) {
750 		swap_list_unlock();
751 		goto out_dput;
752 	}
753 
754 	if (prev < 0) {
755 		swap_list.head = p->next;
756 	} else {
757 		swap_info[prev].next = p->next;
758 	}
759 	if (type == swap_list.next) {
760 		/* just pick something that's safe... */
761 		swap_list.next = swap_list.head;
762 	}
763 	nr_swap_pages -= p->pages;
764 	total_swap_pages -= p->pages;
765 	p->flags = SWP_USED;
766 	swap_list_unlock();
767 	unlock_kernel();
768 	err = try_to_unuse(type);
769 	lock_kernel();
770 	if (err) {
771 		/* re-insert swap space back into swap_list */
772 		swap_list_lock();
773 		for (prev = -1, i = swap_list.head; i >= 0; prev = i, i = swap_info[i].next)
774 			if (p->prio >= swap_info[i].prio)
775 				break;
776 		p->next = i;
777 		if (prev < 0)
778 			swap_list.head = swap_list.next = p - swap_info;
779 		else
780 			swap_info[prev].next = p - swap_info;
781 		nr_swap_pages += p->pages;
782 		total_swap_pages += p->pages;
783 		p->flags = SWP_WRITEOK;
784 		swap_list_unlock();
785 		goto out_dput;
786 	}
787 	if (p->swap_device)
788 		blkdev_put(p->swap_file->d_inode->i_bdev, BDEV_SWAP);
789 	path_release(&nd);
790 
791 	swap_list_lock();
792 	swap_device_lock(p);
793 	nd.mnt = p->swap_vfsmnt;
794 	nd.dentry = p->swap_file;
795 	p->swap_vfsmnt = NULL;
796 	p->swap_file = NULL;
797 	p->swap_device = 0;
798 	p->max = 0;
799 	swap_map = p->swap_map;
800 	p->swap_map = NULL;
801 	p->flags = 0;
802 	swap_device_unlock(p);
803 	swap_list_unlock();
804 	vfree(swap_map);
805 	err = 0;
806 
807 out_dput:
808 	unlock_kernel();
809 	path_release(&nd);
810 out:
811 	return err;
812 }
813 
get_swaparea_info(char * buf)814 int get_swaparea_info(char *buf)
815 {
816 	char * page = (char *) __get_free_page(GFP_KERNEL);
817 	struct swap_info_struct *ptr = swap_info;
818 	int i, j, len = 0, usedswap;
819 
820 	if (!page)
821 		return -ENOMEM;
822 
823 	len += sprintf(buf, "Filename\t\t\tType\t\tSize\tUsed\tPriority\n");
824 	for (i = 0 ; i < nr_swapfiles ; i++, ptr++) {
825 		if ((ptr->flags & SWP_USED) && ptr->swap_map) {
826 			char * path = d_path(ptr->swap_file, ptr->swap_vfsmnt,
827 						page, PAGE_SIZE);
828 
829 			len += sprintf(buf + len, "%-31s ", path);
830 
831 			if (!ptr->swap_device)
832 				len += sprintf(buf + len, "file\t\t");
833 			else
834 				len += sprintf(buf + len, "partition\t");
835 
836 			usedswap = 0;
837 			for (j = 0; j < ptr->max; ++j)
838 				switch (ptr->swap_map[j]) {
839 					case SWAP_MAP_BAD:
840 					case 0:
841 						continue;
842 					default:
843 						usedswap++;
844 				}
845 			len += sprintf(buf + len, "%d\t%d\t%d\n", ptr->pages << (PAGE_SHIFT - 10),
846 				usedswap << (PAGE_SHIFT - 10), ptr->prio);
847 		}
848 	}
849 	free_page((unsigned long) page);
850 	return len;
851 }
852 
is_swap_partition(kdev_t dev)853 int is_swap_partition(kdev_t dev) {
854 	struct swap_info_struct *ptr = swap_info;
855 	int i;
856 
857 	for (i = 0 ; i < nr_swapfiles ; i++, ptr++) {
858 		if (ptr->flags & SWP_USED)
859 			if (ptr->swap_device == dev)
860 				return 1;
861 	}
862 	return 0;
863 }
864 
865 /*
866  * Written 01/25/92 by Simmule Turner, heavily changed by Linus.
867  *
868  * The swapon system call
869  */
sys_swapon(const char * specialfile,int swap_flags)870 asmlinkage long sys_swapon(const char * specialfile, int swap_flags)
871 {
872 	struct swap_info_struct * p;
873 	struct nameidata nd;
874 	struct inode * swap_inode;
875 	unsigned int type;
876 	int i, j, prev;
877 	int error;
878 	static int least_priority = 0;
879 	union swap_header *swap_header = 0;
880 	int swap_header_version;
881 	int nr_good_pages = 0;
882 	unsigned long maxpages = 1;
883 	int swapfilesize;
884 	struct block_device *bdev = NULL;
885 	unsigned short *swap_map;
886 
887 	if (!capable(CAP_SYS_ADMIN))
888 		return -EPERM;
889 	lock_kernel();
890 	swap_list_lock();
891 	p = swap_info;
892 	for (type = 0 ; type < nr_swapfiles ; type++,p++)
893 		if (!(p->flags & SWP_USED))
894 			break;
895 	error = -EPERM;
896 	if (type >= MAX_SWAPFILES) {
897 		swap_list_unlock();
898 		goto out;
899 	}
900 	if (type >= nr_swapfiles)
901 		nr_swapfiles = type+1;
902 	p->flags = SWP_USED;
903 	p->swap_file = NULL;
904 	p->swap_vfsmnt = NULL;
905 	p->swap_device = 0;
906 	p->swap_map = NULL;
907 	p->lowest_bit = 0;
908 	p->highest_bit = 0;
909 	p->cluster_nr = 0;
910 	p->sdev_lock = SPIN_LOCK_UNLOCKED;
911 	p->next = -1;
912 	if (swap_flags & SWAP_FLAG_PREFER) {
913 		p->prio =
914 		  (swap_flags & SWAP_FLAG_PRIO_MASK)>>SWAP_FLAG_PRIO_SHIFT;
915 	} else {
916 		p->prio = --least_priority;
917 	}
918 	swap_list_unlock();
919 	error = user_path_walk(specialfile, &nd);
920 	if (error)
921 		goto bad_swap_2;
922 
923 	p->swap_file = nd.dentry;
924 	p->swap_vfsmnt = nd.mnt;
925 	swap_inode = nd.dentry->d_inode;
926 	error = -EINVAL;
927 
928 	if (S_ISBLK(swap_inode->i_mode)) {
929 		kdev_t dev = swap_inode->i_rdev;
930 		struct block_device_operations *bdops;
931 		devfs_handle_t de;
932 
933 		if (is_mounted(dev)) {
934 			error = -EBUSY;
935 			goto bad_swap_2;
936 		}
937 
938 		p->swap_device = dev;
939 		set_blocksize(dev, PAGE_SIZE);
940 
941 		bd_acquire(swap_inode);
942 		bdev = swap_inode->i_bdev;
943 		de = devfs_get_handle_from_inode(swap_inode);
944 		bdops = devfs_get_ops(de);  /*  Increments module use count  */
945 		if (bdops) bdev->bd_op = bdops;
946 
947 		error = blkdev_get(bdev, FMODE_READ|FMODE_WRITE, 0, BDEV_SWAP);
948 		devfs_put_ops(de);/*Decrement module use count now we're safe*/
949 		if (error)
950 			goto bad_swap_2;
951 		set_blocksize(dev, PAGE_SIZE);
952 		error = -ENODEV;
953 		if (!dev || (blk_size[MAJOR(dev)] &&
954 		     !blk_size[MAJOR(dev)][MINOR(dev)]))
955 			goto bad_swap;
956 		swapfilesize = 0;
957 		if (blk_size[MAJOR(dev)])
958 			swapfilesize = blk_size[MAJOR(dev)][MINOR(dev)]
959 				>> (PAGE_SHIFT - 10);
960 	} else if (S_ISREG(swap_inode->i_mode))
961 		swapfilesize = swap_inode->i_size >> PAGE_SHIFT;
962 	else
963 		goto bad_swap;
964 
965 	error = -EBUSY;
966 	for (i = 0 ; i < nr_swapfiles ; i++) {
967 		struct swap_info_struct *q = &swap_info[i];
968 		if (i == type || !q->swap_file)
969 			continue;
970 		if (swap_inode->i_mapping == q->swap_file->d_inode->i_mapping)
971 			goto bad_swap;
972 	}
973 
974 	swap_header = (void *) __get_free_page(GFP_USER);
975 	if (!swap_header) {
976 		printk("Unable to start swapping: out of memory :-)\n");
977 		error = -ENOMEM;
978 		goto bad_swap;
979 	}
980 
981 	lock_page(virt_to_page(swap_header));
982 	rw_swap_page_nolock(READ, SWP_ENTRY(type,0), (char *) swap_header);
983 
984 	if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10))
985 		swap_header_version = 1;
986 	else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10))
987 		swap_header_version = 2;
988 	else {
989 		printk("Unable to find swap-space signature\n");
990 		error = -EINVAL;
991 		goto bad_swap;
992 	}
993 
994 	switch (swap_header_version) {
995 	case 1:
996 		memset(((char *) swap_header)+PAGE_SIZE-10,0,10);
997 		j = 0;
998 		p->lowest_bit = 0;
999 		p->highest_bit = 0;
1000 		for (i = 1 ; i < 8*PAGE_SIZE ; i++) {
1001 			if (test_bit(i,(char *) swap_header)) {
1002 				if (!p->lowest_bit)
1003 					p->lowest_bit = i;
1004 				p->highest_bit = i;
1005 				maxpages = i+1;
1006 				j++;
1007 			}
1008 		}
1009 		nr_good_pages = j;
1010 		p->swap_map = vmalloc(maxpages * sizeof(short));
1011 		if (!p->swap_map) {
1012 			error = -ENOMEM;
1013 			goto bad_swap;
1014 		}
1015 		for (i = 1 ; i < maxpages ; i++) {
1016 			if (test_bit(i,(char *) swap_header))
1017 				p->swap_map[i] = 0;
1018 			else
1019 				p->swap_map[i] = SWAP_MAP_BAD;
1020 		}
1021 		break;
1022 
1023 	case 2:
1024 		/* Check the swap header's sub-version and the size of
1025                    the swap file and bad block lists */
1026 		if (swap_header->info.version != 1) {
1027 			printk(KERN_WARNING
1028 			       "Unable to handle swap header version %d\n",
1029 			       swap_header->info.version);
1030 			error = -EINVAL;
1031 			goto bad_swap;
1032 		}
1033 
1034 		p->lowest_bit  = 1;
1035 		maxpages = SWP_OFFSET(SWP_ENTRY(0,~0UL)) - 1;
1036 		if (maxpages > swap_header->info.last_page)
1037 			maxpages = swap_header->info.last_page;
1038 		p->highest_bit = maxpages - 1;
1039 
1040 		error = -EINVAL;
1041 		if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
1042 			goto bad_swap;
1043 
1044 		/* OK, set up the swap map and apply the bad block list */
1045 		if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) {
1046 			error = -ENOMEM;
1047 			goto bad_swap;
1048 		}
1049 
1050 		error = 0;
1051 		memset(p->swap_map, 0, maxpages * sizeof(short));
1052 		for (i=0; i<swap_header->info.nr_badpages; i++) {
1053 			int page = swap_header->info.badpages[i];
1054 			if (page <= 0 || page >= swap_header->info.last_page)
1055 				error = -EINVAL;
1056 			else
1057 				p->swap_map[page] = SWAP_MAP_BAD;
1058 		}
1059 		nr_good_pages = swap_header->info.last_page -
1060 				swap_header->info.nr_badpages -
1061 				1 /* header page */;
1062 		if (error)
1063 			goto bad_swap;
1064 	}
1065 
1066 	if (swapfilesize && maxpages > swapfilesize) {
1067 		printk(KERN_WARNING
1068 		       "Swap area shorter than signature indicates\n");
1069 		error = -EINVAL;
1070 		goto bad_swap;
1071 	}
1072 	if (!nr_good_pages) {
1073 		printk(KERN_WARNING "Empty swap-file\n");
1074 		error = -EINVAL;
1075 		goto bad_swap;
1076 	}
1077 	p->swap_map[0] = SWAP_MAP_BAD;
1078 	swap_list_lock();
1079 	swap_device_lock(p);
1080 	p->max = maxpages;
1081 	p->flags = SWP_WRITEOK;
1082 	p->pages = nr_good_pages;
1083 	nr_swap_pages += nr_good_pages;
1084 	total_swap_pages += nr_good_pages;
1085 	printk(KERN_INFO "Adding Swap: %dk swap-space (priority %d)\n",
1086 	       nr_good_pages<<(PAGE_SHIFT-10), p->prio);
1087 
1088 	/* insert swap space into swap_list: */
1089 	prev = -1;
1090 	for (i = swap_list.head; i >= 0; i = swap_info[i].next) {
1091 		if (p->prio >= swap_info[i].prio) {
1092 			break;
1093 		}
1094 		prev = i;
1095 	}
1096 	p->next = i;
1097 	if (prev < 0) {
1098 		swap_list.head = swap_list.next = p - swap_info;
1099 	} else {
1100 		swap_info[prev].next = p - swap_info;
1101 	}
1102 	swap_device_unlock(p);
1103 	swap_list_unlock();
1104 	error = 0;
1105 	goto out;
1106 bad_swap:
1107 	if (bdev)
1108 		blkdev_put(bdev, BDEV_SWAP);
1109 bad_swap_2:
1110 	swap_list_lock();
1111 	swap_map = p->swap_map;
1112 	nd.mnt = p->swap_vfsmnt;
1113 	nd.dentry = p->swap_file;
1114 	p->swap_device = 0;
1115 	p->swap_file = NULL;
1116 	p->swap_vfsmnt = NULL;
1117 	p->swap_map = NULL;
1118 	p->flags = 0;
1119 	if (!(swap_flags & SWAP_FLAG_PREFER))
1120 		++least_priority;
1121 	swap_list_unlock();
1122 	if (swap_map)
1123 		vfree(swap_map);
1124 	path_release(&nd);
1125 out:
1126 	if (swap_header)
1127 		free_page((long) swap_header);
1128 	unlock_kernel();
1129 	return error;
1130 }
1131 
si_swapinfo(struct sysinfo * val)1132 void si_swapinfo(struct sysinfo *val)
1133 {
1134 	unsigned int i;
1135 	unsigned long nr_to_be_unused = 0;
1136 
1137 	swap_list_lock();
1138 	for (i = 0; i < nr_swapfiles; i++) {
1139 		unsigned int j;
1140 		if (swap_info[i].flags != SWP_USED)
1141 			continue;
1142 		for (j = 0; j < swap_info[i].max; ++j) {
1143 			switch (swap_info[i].swap_map[j]) {
1144 				case 0:
1145 				case SWAP_MAP_BAD:
1146 					continue;
1147 				default:
1148 					nr_to_be_unused++;
1149 			}
1150 		}
1151 	}
1152 	val->freeswap = nr_swap_pages + nr_to_be_unused;
1153 	val->totalswap = total_swap_pages + nr_to_be_unused;
1154 	swap_list_unlock();
1155 }
1156 
1157 /*
1158  * Verify that a swap entry is valid and increment its swap map count.
1159  *
1160  * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as
1161  * "permanent", but will be reclaimed by the next swapoff.
1162  */
swap_duplicate(swp_entry_t entry)1163 int swap_duplicate(swp_entry_t entry)
1164 {
1165 	struct swap_info_struct * p;
1166 	unsigned long offset, type;
1167 	int result = 0;
1168 
1169 	type = SWP_TYPE(entry);
1170 	if (type >= nr_swapfiles)
1171 		goto bad_file;
1172 	p = type + swap_info;
1173 	offset = SWP_OFFSET(entry);
1174 
1175 	swap_device_lock(p);
1176 	if (offset < p->max && p->swap_map[offset]) {
1177 		if (p->swap_map[offset] < SWAP_MAP_MAX - 1) {
1178 			p->swap_map[offset]++;
1179 			result = 1;
1180 		} else if (p->swap_map[offset] <= SWAP_MAP_MAX) {
1181 			if (swap_overflow++ < 5)
1182 				printk(KERN_WARNING "swap_dup: swap entry overflow\n");
1183 			p->swap_map[offset] = SWAP_MAP_MAX;
1184 			result = 1;
1185 		}
1186 	}
1187 	swap_device_unlock(p);
1188 out:
1189 	return result;
1190 
1191 bad_file:
1192 	printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val);
1193 	goto out;
1194 }
1195 
1196 /*
1197  * Prior swap_duplicate protects against swap device deletion.
1198  */
get_swaphandle_info(swp_entry_t entry,unsigned long * offset,kdev_t * dev,struct inode ** swapf)1199 void get_swaphandle_info(swp_entry_t entry, unsigned long *offset,
1200 			kdev_t *dev, struct inode **swapf)
1201 {
1202 	unsigned long type;
1203 	struct swap_info_struct *p;
1204 
1205 	type = SWP_TYPE(entry);
1206 	if (type >= nr_swapfiles) {
1207 		printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_file, entry.val);
1208 		return;
1209 	}
1210 
1211 	p = &swap_info[type];
1212 	*offset = SWP_OFFSET(entry);
1213 	if (*offset >= p->max && *offset != 0) {
1214 		printk(KERN_ERR "rw_swap_page: %s%08lx\n", Bad_offset, entry.val);
1215 		return;
1216 	}
1217 	if (p->swap_map && !p->swap_map[*offset]) {
1218 		printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_offset, entry.val);
1219 		return;
1220 	}
1221 	if (!(p->flags & SWP_USED)) {
1222 		printk(KERN_ERR "rw_swap_page: %s%08lx\n", Unused_file, entry.val);
1223 		return;
1224 	}
1225 
1226 	if (p->swap_device) {
1227 		*dev = p->swap_device;
1228 	} else if (p->swap_file) {
1229 		*swapf = p->swap_file->d_inode;
1230 	} else {
1231 		printk(KERN_ERR "rw_swap_page: no swap file or device\n");
1232 	}
1233 	return;
1234 }
1235 
1236 /*
1237  * swap_device_lock prevents swap_map being freed. Don't grab an extra
1238  * reference on the swaphandle, it doesn't matter if it becomes unused.
1239  */
valid_swaphandles(swp_entry_t entry,unsigned long * offset)1240 int valid_swaphandles(swp_entry_t entry, unsigned long *offset)
1241 {
1242 	int ret = 0, i = 1 << page_cluster;
1243 	unsigned long toff;
1244 	struct swap_info_struct *swapdev = SWP_TYPE(entry) + swap_info;
1245 
1246 	if (!page_cluster)	/* no readahead */
1247 		return 0;
1248 	toff = (SWP_OFFSET(entry) >> page_cluster) << page_cluster;
1249 	if (!toff)		/* first page is swap header */
1250 		toff++, i--;
1251 	*offset = toff;
1252 
1253 	swap_device_lock(swapdev);
1254 	do {
1255 		/* Don't read-ahead past the end of the swap area */
1256 		if (toff >= swapdev->max)
1257 			break;
1258 		/* Don't read in free or bad pages */
1259 		if (!swapdev->swap_map[toff])
1260 			break;
1261 		if (swapdev->swap_map[toff] == SWAP_MAP_BAD)
1262 			break;
1263 		toff++;
1264 		ret++;
1265 	} while (--i);
1266 	swap_device_unlock(swapdev);
1267 	return ret;
1268 }
1269