1 /*
2 * linux/mm/page_io.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 *
6 * Swap reorganised 29.12.95,
7 * Asynchronous swapping added 30.12.95. Stephen Tweedie
8 * Removed race in async swapping. 14.4.1996. Bruno Haible
9 * Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
10 * Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
11 */
12
13 #include <linux/mm.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/swap.h>
16 #include <linux/locks.h>
17 #include <linux/swapctl.h>
18
19 #include <asm/pgtable.h>
20
21 /*
22 * Reads or writes a swap page.
23 * wait=1: start I/O and wait for completion. wait=0: start asynchronous I/O.
24 *
25 * Important prevention of race condition: the caller *must* atomically
26 * create a unique swap cache entry for this swap page before calling
27 * rw_swap_page, and must lock that page. By ensuring that there is a
28 * single page of memory reserved for the swap entry, the normal VM page
29 * lock on that page also doubles as a lock on swap entries. Having only
30 * one lock to deal with per swap entry (rather than locking swap and memory
31 * independently) also makes it easier to make certain swapping operations
32 * atomic, which is particularly important when we are trying to ensure
33 * that shared pages stay shared while being swapped.
34 */
35
rw_swap_page_base(int rw,swp_entry_t entry,struct page * page)36 static int rw_swap_page_base(int rw, swp_entry_t entry, struct page *page)
37 {
38 unsigned long offset;
39 int zones[PAGE_SIZE/512];
40 int zones_used;
41 kdev_t dev = 0;
42 int block_size;
43 struct inode *swapf = 0;
44
45 if (rw == READ) {
46 ClearPageUptodate(page);
47 kstat.pswpin++;
48 } else
49 kstat.pswpout++;
50
51 get_swaphandle_info(entry, &offset, &dev, &swapf);
52 if (dev) {
53 zones[0] = offset;
54 zones_used = 1;
55 block_size = PAGE_SIZE;
56 } else if (swapf) {
57 int i, j;
58 unsigned int block = offset
59 << (PAGE_SHIFT - swapf->i_sb->s_blocksize_bits);
60
61 block_size = swapf->i_sb->s_blocksize;
62 for (i=0, j=0; j< PAGE_SIZE ; i++, j += block_size)
63 if (!(zones[i] = bmap(swapf,block++))) {
64 printk("rw_swap_page: bad swap file\n");
65 return 0;
66 }
67 zones_used = i;
68 dev = swapf->i_dev;
69 } else {
70 return 0;
71 }
72
73 /* block_size == PAGE_SIZE/zones_used */
74 brw_page(rw, page, dev, zones, block_size);
75 return 1;
76 }
77
78 /*
79 * A simple wrapper so the base function doesn't need to enforce
80 * that all swap pages go through the swap cache! We verify that:
81 * - the page is locked
82 * - it's marked as being swap-cache
83 * - it's associated with the swap inode
84 */
rw_swap_page(int rw,struct page * page)85 void rw_swap_page(int rw, struct page *page)
86 {
87 swp_entry_t entry;
88
89 entry.val = page->index;
90
91 if (!PageLocked(page))
92 PAGE_BUG(page);
93 if (!PageSwapCache(page))
94 PAGE_BUG(page);
95 if (!rw_swap_page_base(rw, entry, page))
96 UnlockPage(page);
97 }
98
99 /*
100 * The swap lock map insists that pages be in the page cache!
101 * Therefore we can't use it. Later when we can remove the need for the
102 * lock map and we can reduce the number of functions exported.
103 */
rw_swap_page_nolock(int rw,swp_entry_t entry,char * buf)104 void rw_swap_page_nolock(int rw, swp_entry_t entry, char *buf)
105 {
106 struct page *page = virt_to_page(buf);
107
108 if (!PageLocked(page))
109 PAGE_BUG(page);
110 if (page->mapping)
111 PAGE_BUG(page);
112 /* needs sync_page to wait I/O completation */
113 page->mapping = &swapper_space;
114 if (rw_swap_page_base(rw, entry, page))
115 lock_page(page);
116 if (!block_flushpage(page, 0))
117 PAGE_BUG(page);
118 page->mapping = NULL;
119 UnlockPage(page);
120 }
121