1 /*
2  *  linux/arch/arm/mm/small_page.c
3  *
4  *  Copyright (C) 1996  Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  Changelog:
11  *   26/01/1996	RMK	Cleaned up various areas to make little more generic
12  *   07/02/1999	RMK	Support added for 16K and 32K page sizes
13  *			containing 8K blocks
14  */
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/ptrace.h>
22 #include <linux/mman.h>
23 #include <linux/mm.h>
24 #include <linux/swap.h>
25 #include <linux/smp.h>
26 
27 #include <asm/bitops.h>
28 #include <asm/pgtable.h>
29 
30 #define PEDANTIC
31 
32 /*
33  * Requirement:
34  *  We need to be able to allocate naturally aligned memory of finer
35  *  granularity than the page size.  This is typically used for the
36  *  second level page tables on 32-bit ARMs.
37  *
38  * Theory:
39  *  We "misuse" the Linux memory management system.  We use alloc_page
40  *  to allocate a page and then mark it as reserved.  The Linux memory
41  *  management system will then ignore the "offset", "next_hash" and
42  *  "pprev_hash" entries in the mem_map for this page.
43  *
44  *  We then use a bitstring in the "offset" field to mark which segments
45  *  of the page are in use, and manipulate this as required during the
46  *  allocation and freeing of these small pages.
47  *
48  *  We also maintain a queue of pages being used for this purpose using
49  *  the "next_hash" and "pprev_hash" entries of mem_map;
50  */
51 
52 struct order {
53 	struct page *queue;
54 	unsigned int mask;		/* (1 << shift) - 1		*/
55 	unsigned int shift;		/* (1 << shift) size of page	*/
56 	unsigned int block_mask;	/* nr_blocks - 1		*/
57 	unsigned int all_used;		/* (1 << nr_blocks) - 1		*/
58 };
59 
60 
61 static struct order orders[] = {
62 #if PAGE_SIZE == 4096
63 	{ NULL, 2047, 11,  1, 0x00000003 }
64 #elif PAGE_SIZE == 32768
65 	{ NULL, 2047, 11, 15, 0x0000ffff },
66 	{ NULL, 8191, 13,  3, 0x0000000f }
67 #else
68 #error unsupported page size
69 #endif
70 };
71 
72 #define USED_MAP(pg)			((pg)->index)
73 #define TEST_AND_CLEAR_USED(pg,off)	(test_and_clear_bit(off, &USED_MAP(pg)))
74 #define SET_USED(pg,off)		(set_bit(off, &USED_MAP(pg)))
75 
76 static spinlock_t small_page_lock = SPIN_LOCK_UNLOCKED;
77 
add_page_to_queue(struct page * page,struct page ** p)78 static void add_page_to_queue(struct page *page, struct page **p)
79 {
80 #ifdef PEDANTIC
81 	if (page->pprev_hash)
82 		PAGE_BUG(page);
83 #endif
84 	page->next_hash = *p;
85 	if (*p)
86 		(*p)->pprev_hash = &page->next_hash;
87 	*p = page;
88 	page->pprev_hash = p;
89 }
90 
remove_page_from_queue(struct page * page)91 static void remove_page_from_queue(struct page *page)
92 {
93 	if (page->pprev_hash) {
94 		if (page->next_hash)
95 			page->next_hash->pprev_hash = page->pprev_hash;
96 		*page->pprev_hash = page->next_hash;
97 		page->pprev_hash = NULL;
98 	}
99 }
100 
__get_small_page(int priority,struct order * order)101 static unsigned long __get_small_page(int priority, struct order *order)
102 {
103 	unsigned long flags;
104 	struct page *page;
105 	int offset;
106 
107 	if (!order->queue)
108 		goto need_new_page;
109 
110 	spin_lock_irqsave(&small_page_lock, flags);
111 	page = order->queue;
112 again:
113 #ifdef PEDANTIC
114 	if (USED_MAP(page) & ~order->all_used)
115 		PAGE_BUG(page);
116 #endif
117 	offset = ffz(USED_MAP(page));
118 	SET_USED(page, offset);
119 	if (USED_MAP(page) == order->all_used)
120 		remove_page_from_queue(page);
121 	spin_unlock_irqrestore(&small_page_lock, flags);
122 
123 	return (unsigned long) page_address(page) + (offset << order->shift);
124 
125 need_new_page:
126 	page = alloc_page(priority);
127 
128 	spin_lock_irqsave(&small_page_lock, flags);
129 	if (!order->queue) {
130 		if (!page)
131 			goto no_page;
132 		SetPageReserved(page);
133 		USED_MAP(page) = 0;
134 		cli();
135 		add_page_to_queue(page, &order->queue);
136 	} else {
137 		__free_page(page);
138 		cli();
139 		page = order->queue;
140 	}
141 	goto again;
142 
143 no_page:
144 	spin_unlock_irqrestore(&small_page_lock, flags);
145 	return 0;
146 }
147 
__free_small_page(unsigned long spage,struct order * order)148 static void __free_small_page(unsigned long spage, struct order *order)
149 {
150 	unsigned long flags;
151 	struct page *page;
152 
153 	if (virt_addr_valid(spage)) {
154 		page = virt_to_page(spage);
155 
156 		/*
157 		 * The container-page must be marked Reserved
158 		 */
159 		if (!PageReserved(page) || spage & order->mask)
160 			goto non_small;
161 
162 #ifdef PEDANTIC
163 		if (USED_MAP(page) & ~order->all_used)
164 			PAGE_BUG(page);
165 #endif
166 
167 		spage = spage >> order->shift;
168 		spage &= order->block_mask;
169 
170 		/*
171 		 * the following must be atomic wrt get_page
172 		 */
173 		spin_lock_irqsave(&small_page_lock, flags);
174 
175 		if (USED_MAP(page) == order->all_used)
176 			add_page_to_queue(page, &order->queue);
177 
178 		if (!TEST_AND_CLEAR_USED(page, spage))
179 			goto already_free;
180 
181 		if (USED_MAP(page) == 0)
182 			goto free_page;
183 
184 		spin_unlock_irqrestore(&small_page_lock, flags);
185 	}
186 	return;
187 
188 free_page:
189 	/*
190 	 * unlink the page from the small page queue and free it
191 	 */
192 	remove_page_from_queue(page);
193 	spin_unlock_irqrestore(&small_page_lock, flags);
194 	ClearPageReserved(page);
195 	__free_page(page);
196 	return;
197 
198 non_small:
199 	printk("Trying to free non-small page from %p\n", __builtin_return_address(0));
200 	return;
201 already_free:
202 	printk("Trying to free free small page from %p\n", __builtin_return_address(0));
203 }
204 
get_page_8k(int priority)205 unsigned long get_page_8k(int priority)
206 {
207 	return __get_small_page(priority, orders+1);
208 }
209 
free_page_8k(unsigned long spage)210 void free_page_8k(unsigned long spage)
211 {
212 	__free_small_page(spage, orders+1);
213 }
214