1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2001 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_PGALLOC_H
10 #define _ASM_PGALLOC_H
11
12 #include <linux/config.h>
13 #include <linux/mm.h>
14 #include <asm/fixmap.h>
15
16 /* TLB flushing:
17 *
18 * - flush_tlb_all() flushes all processes TLB entries
19 * - flush_tlb_mm(mm) flushes the specified mm context TLB entries
20 * - flush_tlb_page(mm, vmaddr) flushes a single page
21 * - flush_tlb_range(mm, start, end) flushes a range of pages
22 * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
23 * - flush_tlb_one(page) flushes a single kernel page
24 */
25 extern void local_flush_tlb_all(void);
26 extern void local_flush_tlb_mm(struct mm_struct *mm);
27 extern void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
28 unsigned long end);
29 extern void local_flush_tlb_page(struct vm_area_struct *vma,
30 unsigned long page);
31 extern void local_flush_tlb_one(unsigned long page);
32
33 #ifdef CONFIG_SMP
34
35 extern void flush_tlb_all(void);
36 extern void flush_tlb_mm(struct mm_struct *);
37 extern void flush_tlb_range(struct mm_struct *, unsigned long, unsigned long);
38 extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
39
40 #else /* CONFIG_SMP */
41
42 #define flush_tlb_all() local_flush_tlb_all()
43 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
44 #define flush_tlb_range(mm,vmaddr,end) local_flush_tlb_range(mm, vmaddr, end)
45 #define flush_tlb_page(vma,page) local_flush_tlb_page(vma, page)
46
47 #endif /* CONFIG_SMP */
48
flush_tlb_pgtables(struct mm_struct * mm,unsigned long start,unsigned long end)49 static inline void flush_tlb_pgtables(struct mm_struct *mm,
50 unsigned long start, unsigned long end)
51 {
52 /* Nothing to do on MIPS. */
53 }
54
55
56 /*
57 * Allocate and free page tables.
58 */
59
60 #define pgd_quicklist (current_cpu_data.pgd_quick)
61 #define pmd_quicklist ((unsigned long *)0)
62 #define pte_quicklist (current_cpu_data.pte_quick)
63 #define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
64
65 #define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
66
67 /*
68 * Initialize new page directory with pointers to invalid ptes
69 */
70 extern void pgd_init(unsigned long page);
71
get_pgd_slow(void)72 static __inline__ pgd_t *get_pgd_slow(void)
73 {
74 pgd_t *ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER), *init;
75
76 if (ret) {
77 init = pgd_offset(&init_mm, 0);
78 pgd_init((unsigned long)ret);
79 memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
80 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
81 }
82 return ret;
83 }
84
get_pgd_fast(void)85 static __inline__ pgd_t *get_pgd_fast(void)
86 {
87 unsigned long *ret;
88
89 if((ret = pgd_quicklist) != NULL) {
90 pgd_quicklist = (unsigned long *)(*ret);
91 ret[0] = ret[1];
92 pgtable_cache_size--;
93 } else
94 ret = (unsigned long *)get_pgd_slow();
95 return (pgd_t *)ret;
96 }
97
free_pgd_fast(pgd_t * pgd)98 static __inline__ void free_pgd_fast(pgd_t *pgd)
99 {
100 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
101 pgd_quicklist = (unsigned long *) pgd;
102 pgtable_cache_size++;
103 }
104
free_pgd_slow(pgd_t * pgd)105 static __inline__ void free_pgd_slow(pgd_t *pgd)
106 {
107 free_pages((unsigned long)pgd, PGD_ORDER);
108 }
109
get_pte_fast(void)110 static __inline__ pte_t *get_pte_fast(void)
111 {
112 unsigned long *ret;
113
114 if((ret = (unsigned long *)pte_quicklist) != NULL) {
115 pte_quicklist = (unsigned long *)(*ret);
116 ret[0] = ret[1];
117 pgtable_cache_size--;
118 }
119 return (pte_t *)ret;
120 }
121
free_pte_fast(pte_t * pte)122 static __inline__ void free_pte_fast(pte_t *pte)
123 {
124 *(unsigned long *)pte = (unsigned long) pte_quicklist;
125 pte_quicklist = (unsigned long *) pte;
126 pgtable_cache_size++;
127 }
128
free_pte_slow(pte_t * pte)129 static __inline__ void free_pte_slow(pte_t *pte)
130 {
131 free_page((unsigned long)pte);
132 }
133
134 /* We don't use pmd cache, so these are dummy routines */
get_pmd_fast(void)135 static __inline__ pmd_t *get_pmd_fast(void)
136 {
137 return (pmd_t *)0;
138 }
139
free_pmd_fast(pmd_t * pmd)140 static __inline__ void free_pmd_fast(pmd_t *pmd)
141 {
142 }
143
free_pmd_slow(pmd_t * pmd)144 static __inline__ void free_pmd_slow(pmd_t *pmd)
145 {
146 }
147
148 extern void __bad_pte(pmd_t *pmd);
149
pte_alloc_one(struct mm_struct * mm,unsigned long address)150 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
151 {
152 pte_t *pte;
153
154 pte = (pte_t *) __get_free_page(GFP_KERNEL);
155 if (pte)
156 clear_page(pte);
157 return pte;
158 }
159
pte_alloc_one_fast(struct mm_struct * mm,unsigned long address)160 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
161 {
162 unsigned long *ret;
163
164 if ((ret = (unsigned long *)pte_quicklist) != NULL) {
165 pte_quicklist = (unsigned long *)(*ret);
166 ret[0] = ret[1];
167 pgtable_cache_size--;
168 }
169 return (pte_t *)ret;
170 }
171
pte_free_fast(pte_t * pte)172 static __inline__ void pte_free_fast(pte_t *pte)
173 {
174 *(unsigned long *)pte = (unsigned long) pte_quicklist;
175 pte_quicklist = (unsigned long *) pte;
176 pgtable_cache_size++;
177 }
178
pte_free_slow(pte_t * pte)179 static __inline__ void pte_free_slow(pte_t *pte)
180 {
181 free_page((unsigned long)pte);
182 }
183
184 #define pte_free(pte) pte_free_fast(pte)
185 #define pgd_free(pgd) free_pgd_fast(pgd)
186 #define pgd_alloc(mm) get_pgd_fast()
187
188 /*
189 * allocating and freeing a pmd is trivial: the 1-entry pmd is
190 * inside the pgd, so has no extra memory associated with it.
191 */
192 #define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
193 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
194 #define pmd_free(x) do { } while (0)
195 #define pgd_populate(mm, pmd, pte) BUG()
196
197 extern int do_check_pgt_cache(int, int);
198
199 #endif /* _ASM_PGALLOC_H */
200