1 /*
2 * linux/mm/swap.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 */
6
7 /*
8 * This file contains the default values for the opereation of the
9 * Linux VM subsystem. Fine-tuning documentation can be found in
10 * linux/Documentation/sysctl/vm.txt.
11 * Started 18.12.91
12 * Swap aging added 23.2.95, Stephen Tweedie.
13 * Buffermem limits added 12.3.98, Rik van Riel.
14 */
15
16 #include <linux/mm.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/swap.h>
19 #include <linux/swapctl.h>
20 #include <linux/pagemap.h>
21 #include <linux/init.h>
22
23 #include <asm/dma.h>
24 #include <asm/uaccess.h> /* for copy_to/from_user */
25 #include <asm/pgtable.h>
26
27 /* How many pages do we try to swap or page in/out together? */
28 int page_cluster;
29
30 pager_daemon_t pager_daemon = {
31 512, /* base number for calculating the number of tries */
32 SWAP_CLUSTER_MAX, /* minimum number of tries */
33 8, /* do swap I/O in clusters of this size */
34 };
35
36 /*
37 * Move an inactive page to the active list.
38 */
activate_page_nolock(struct page * page)39 static inline void activate_page_nolock(struct page * page)
40 {
41 if (PageLRU(page) && !PageActive(page)) {
42 del_page_from_inactive_list(page);
43 add_page_to_active_list(page);
44 }
45 }
46
activate_page(struct page * page)47 void fastcall activate_page(struct page * page)
48 {
49 spin_lock(&pagemap_lru_lock);
50 activate_page_nolock(page);
51 spin_unlock(&pagemap_lru_lock);
52 }
53
54 /**
55 * lru_cache_add: add a page to the page lists
56 * @page: the page to add
57 */
lru_cache_add(struct page * page)58 void fastcall lru_cache_add(struct page * page)
59 {
60 if (!PageLRU(page)) {
61 spin_lock(&pagemap_lru_lock);
62 if (!TestSetPageLRU(page))
63 add_page_to_inactive_list(page);
64 spin_unlock(&pagemap_lru_lock);
65 }
66 }
67
68 /**
69 * __lru_cache_del: remove a page from the page lists
70 * @page: the page to add
71 *
72 * This function is for when the caller already holds
73 * the pagemap_lru_lock.
74 */
__lru_cache_del(struct page * page)75 void fastcall __lru_cache_del(struct page * page)
76 {
77 if (TestClearPageLRU(page)) {
78 if (PageActive(page)) {
79 del_page_from_active_list(page);
80 } else {
81 del_page_from_inactive_list(page);
82 }
83 }
84 }
85
86 /**
87 * lru_cache_del: remove a page from the page lists
88 * @page: the page to remove
89 */
lru_cache_del(struct page * page)90 void fastcall lru_cache_del(struct page * page)
91 {
92 spin_lock(&pagemap_lru_lock);
93 __lru_cache_del(page);
94 spin_unlock(&pagemap_lru_lock);
95 }
96
97 /**
98 * delta_nr_active_pages: alter the number of active pages.
99 *
100 * @page: the page which is being activated/deactivated
101 * @delta: +1 for activation, -1 for deactivation
102 *
103 * Called under pagecache_lock
104 */
delta_nr_active_pages(struct page * page,long delta)105 void delta_nr_active_pages(struct page *page, long delta)
106 {
107 pg_data_t *pgdat;
108 zone_t *classzone, *overflow;
109
110 classzone = page_zone(page);
111 pgdat = classzone->zone_pgdat;
112 overflow = pgdat->node_zones + pgdat->nr_zones;
113
114 while (classzone < overflow) {
115 classzone->nr_active_pages += delta;
116 classzone++;
117 }
118 nr_active_pages += delta;
119 }
120
121 /**
122 * delta_nr_inactive_pages: alter the number of inactive pages.
123 *
124 * @page: the page which is being deactivated/activated
125 * @delta: +1 for deactivation, -1 for activation
126 *
127 * Called under pagecache_lock
128 */
delta_nr_inactive_pages(struct page * page,long delta)129 void delta_nr_inactive_pages(struct page *page, long delta)
130 {
131 pg_data_t *pgdat;
132 zone_t *classzone, *overflow;
133
134 classzone = page_zone(page);
135 pgdat = classzone->zone_pgdat;
136 overflow = pgdat->node_zones + pgdat->nr_zones;
137
138 while (classzone < overflow) {
139 classzone->nr_inactive_pages += delta;
140 classzone++;
141 }
142 nr_inactive_pages += delta;
143 }
144
145 /**
146 * delta_nr_cache_pages: alter the number of pages in the pagecache
147 *
148 * @page: the page which is being added/removed
149 * @delta: +1 for addition, -1 for removal
150 *
151 * Called under pagecache_lock
152 */
delta_nr_cache_pages(struct page * page,long delta)153 void delta_nr_cache_pages(struct page *page, long delta)
154 {
155 pg_data_t *pgdat;
156 zone_t *classzone, *overflow;
157
158 classzone = page_zone(page);
159 pgdat = classzone->zone_pgdat;
160 overflow = pgdat->node_zones + pgdat->nr_zones;
161
162 while (classzone < overflow) {
163 classzone->nr_cache_pages += delta;
164 classzone++;
165 }
166 page_cache_size += delta;
167 }
168
169 /*
170 * Perform any setup for the swap system
171 */
swap_setup(void)172 void __init swap_setup(void)
173 {
174 unsigned long megs = num_physpages >> (20 - PAGE_SHIFT);
175
176 /* Use a smaller cluster for small-memory machines */
177 if (megs < 16)
178 page_cluster = 2;
179 else
180 page_cluster = 3;
181 /*
182 * Right now other parts of the system means that we
183 * _really_ don't want to cluster much more
184 */
185 }
186