1 #ifndef _LINUX_SWAP_H
2 #define _LINUX_SWAP_H
3 
4 #include <linux/spinlock.h>
5 #include <asm/page.h>
6 
7 #define SWAP_FLAG_PREFER	0x8000	/* set if swap priority specified */
8 #define SWAP_FLAG_PRIO_MASK	0x7fff
9 #define SWAP_FLAG_PRIO_SHIFT	0
10 
11 #define MAX_SWAPFILES 32
12 
13 /*
14  * Magic header for a swap area. The first part of the union is
15  * what the swap magic looks like for the old (limited to 128MB)
16  * swap area format, the second part of the union adds - in the
17  * old reserved area - some extra information. Note that the first
18  * kilobyte is reserved for boot loader or disk label stuff...
19  *
20  * Having the magic at the end of the PAGE_SIZE makes detecting swap
21  * areas somewhat tricky on machines that support multiple page sizes.
22  * For 2.5 we'll probably want to move the magic to just beyond the
23  * bootbits...
24  */
25 union swap_header {
26 	struct
27 	{
28 		char reserved[PAGE_SIZE - 10];
29 		char magic[10];			/* SWAP-SPACE or SWAPSPACE2 */
30 	} magic;
31 	struct
32 	{
33 		char	     bootbits[1024];	/* Space for disklabel etc. */
34 		unsigned int version;
35 		unsigned int last_page;
36 		unsigned int nr_badpages;
37 		unsigned int padding[125];
38 		unsigned int badpages[1];
39 	} info;
40 };
41 
42 #ifdef __KERNEL__
43 
44 /*
45  * Max bad pages in the new format..
46  */
47 #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x)
48 #define MAX_SWAP_BADPAGES \
49 	((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int))
50 
51 #include <asm/atomic.h>
52 
53 #define SWP_USED	1
54 #define SWP_WRITEOK	3
55 
56 #define SWAP_CLUSTER_MAX 32
57 
58 #define SWAP_MAP_MAX	0x7fff
59 #define SWAP_MAP_BAD	0x8000
60 
61 /*
62  * The in-memory structure used to track swap areas.
63  */
64 struct swap_info_struct {
65 	unsigned int flags;
66 	kdev_t swap_device;
67 	spinlock_t sdev_lock;
68 	struct dentry * swap_file;
69 	struct vfsmount *swap_vfsmnt;
70 	unsigned short * swap_map;
71 	unsigned int lowest_bit;
72 	unsigned int highest_bit;
73 	unsigned int cluster_next;
74 	unsigned int cluster_nr;
75 	int prio;			/* swap priority */
76 	int pages;
77 	unsigned long max;
78 	int next;			/* next entry on swap list */
79 };
80 
81 extern int nr_swap_pages;
82 
83 /* Swap 50% full? Release swapcache more aggressively.. */
84 #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
85 
86 extern unsigned int nr_free_pages(void);
87 extern unsigned int nr_free_buffer_pages(void);
88 extern unsigned int freeable_lowmem(void);
89 extern int nr_active_pages;
90 extern int nr_inactive_pages;
91 extern unsigned long page_cache_size;
92 extern atomic_t buffermem_pages;
93 
94 extern spinlock_cacheline_t pagecache_lock_cacheline;
95 #define pagecache_lock (pagecache_lock_cacheline.lock)
96 
97 extern void __remove_inode_page(struct page *);
98 
99 /* Incomplete types for prototype declarations: */
100 struct task_struct;
101 struct vm_area_struct;
102 struct sysinfo;
103 
104 struct zone_t;
105 
106 /* linux/mm/swap.c */
107 extern void FASTCALL(lru_cache_add(struct page *));
108 extern void FASTCALL(__lru_cache_del(struct page *));
109 extern void FASTCALL(lru_cache_del(struct page *));
110 
111 extern void FASTCALL(activate_page(struct page *));
112 
113 extern void swap_setup(void);
114 
115 /* linux/mm/vmscan.c */
116 extern wait_queue_head_t kswapd_wait;
117 extern int FASTCALL(try_to_free_pages_zone(zone_t *, unsigned int));
118 extern int FASTCALL(try_to_free_pages(unsigned int));
119 extern int vm_vfs_scan_ratio, vm_cache_scan_ratio, vm_lru_balance_ratio, vm_passes, vm_gfp_debug, vm_mapped_ratio, vm_anon_lru;
120 
121 /* linux/mm/page_io.c */
122 extern void rw_swap_page(int, struct page *);
123 extern void rw_swap_page_nolock(int, swp_entry_t, char *);
124 
125 /* linux/mm/page_alloc.c */
126 
127 /* linux/mm/swap_state.c */
128 #define SWAP_CACHE_INFO
129 #ifdef SWAP_CACHE_INFO
130 extern void show_swap_cache_info(void);
131 #endif
132 extern int add_to_swap_cache(struct page *, swp_entry_t);
133 extern void __delete_from_swap_cache(struct page *page);
134 extern void delete_from_swap_cache(struct page *page);
135 extern void free_page_and_swap_cache(struct page *page);
136 extern struct page * lookup_swap_cache(swp_entry_t);
137 extern struct page * read_swap_cache_async(swp_entry_t);
138 
139 /* linux/mm/oom_kill.c */
140 extern void out_of_memory(void);
141 
142 /* linux/mm/swapfile.c */
143 extern int total_swap_pages;
144 extern unsigned int nr_swapfiles;
145 extern struct swap_info_struct swap_info[];
146 extern int is_swap_partition(kdev_t);
147 extern void si_swapinfo(struct sysinfo *);
148 extern swp_entry_t get_swap_page(void);
149 extern void get_swaphandle_info(swp_entry_t, unsigned long *, kdev_t *,
150 					struct inode **);
151 extern int swap_duplicate(swp_entry_t);
152 extern int valid_swaphandles(swp_entry_t, unsigned long *);
153 extern void swap_free(swp_entry_t);
154 extern void free_swap_and_cache(swp_entry_t);
155 struct swap_list_t {
156 	int head;	/* head of priority-ordered swapfile list */
157 	int next;	/* swapfile to be used next */
158 };
159 extern struct swap_list_t swap_list;
160 asmlinkage long sys_swapoff(const char *);
161 asmlinkage long sys_swapon(const char *, int);
162 
163 extern spinlock_cacheline_t pagemap_lru_lock_cacheline;
164 #define pagemap_lru_lock pagemap_lru_lock_cacheline.lock
165 
166 extern void FASTCALL(mark_page_accessed(struct page *));
167 
168 /*
169  * List add/del helper macros. These must be called
170  * with the pagemap_lru_lock held!
171  */
172 #define DEBUG_LRU_PAGE(page)			\
173 do {						\
174 	if (!PageLRU(page))			\
175 		BUG();				\
176 	if (PageActive(page))			\
177 		BUG();				\
178 } while (0)
179 
180 extern void delta_nr_active_pages(struct page *page, long delta);
181 #define inc_nr_active_pages(page) delta_nr_active_pages(page, 1)
182 #define dec_nr_active_pages(page) delta_nr_active_pages(page, -1)
183 
184 extern void delta_nr_inactive_pages(struct page *page, long delta);
185 #define inc_nr_inactive_pages(page) delta_nr_inactive_pages(page, 1)
186 #define dec_nr_inactive_pages(page) delta_nr_inactive_pages(page, -1)
187 
188 #define add_page_to_active_list(page)		\
189 do {						\
190 	DEBUG_LRU_PAGE(page);			\
191 	SetPageActive(page);			\
192 	list_add(&(page)->lru, &active_list);	\
193 	inc_nr_active_pages(page);		\
194 } while (0)
195 
196 #define add_page_to_inactive_list(page)		\
197 do {						\
198 	DEBUG_LRU_PAGE(page);			\
199 	list_add(&(page)->lru, &inactive_list);	\
200 	inc_nr_inactive_pages(page);		\
201 } while (0)
202 
203 #define del_page_from_active_list(page)		\
204 do {						\
205 	list_del(&(page)->lru);			\
206 	ClearPageActive(page);			\
207 	dec_nr_active_pages(page);		\
208 } while (0)
209 
210 #define del_page_from_inactive_list(page)	\
211 do {						\
212 	list_del(&(page)->lru);			\
213 	dec_nr_inactive_pages(page);		\
214 } while (0)
215 
216 extern void delta_nr_cache_pages(struct page *page, long delta);
217 #define inc_nr_cache_pages(page) delta_nr_cache_pages(page, 1)
218 #define dec_nr_cache_pages(page) delta_nr_cache_pages(page, -1)
219 
220 extern spinlock_t swaplock;
221 
222 #define swap_list_lock()	spin_lock(&swaplock)
223 #define swap_list_unlock()	spin_unlock(&swaplock)
224 #define swap_device_lock(p)	spin_lock(&p->sdev_lock)
225 #define swap_device_unlock(p)	spin_unlock(&p->sdev_lock)
226 
227 extern int shmem_unuse(swp_entry_t entry, struct page *page);
228 
229 #endif /* __KERNEL__*/
230 
231 #endif /* _LINUX_SWAP_H */
232