1 #ifndef _MOTOROLA_PGALLOC_H
2 #define _MOTOROLA_PGALLOC_H
3 
4 extern struct pgtable_cache_struct {
5 	unsigned long *pmd_cache;
6 	unsigned long *pte_cache;
7 /* This counts in units of pointer tables, of which can be eight per page. */
8 	unsigned long pgtable_cache_sz;
9 } quicklists;
10 
11 #define pgd_quicklist ((unsigned long *)0)
12 #define pmd_quicklist (quicklists.pmd_cache)
13 #define pte_quicklist (quicklists.pte_cache)
14 /* This isn't accurate because of fragmentation of allocated pages for
15    pointer tables, but that should not be a problem. */
16 #define pgtable_cache_size ((quicklists.pgtable_cache_sz+7)/8)
17 
18 extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset);
19 extern pmd_t *get_pmd_slow(pgd_t *pgd, unsigned long offset);
20 
21 extern pmd_t *get_pointer_table(void);
22 extern int free_pointer_table(pmd_t *);
23 
24 
flush_tlb_kernel_page(unsigned long addr)25 static inline void flush_tlb_kernel_page(unsigned long addr)
26 {
27 	if (CPU_IS_040_OR_060) {
28 		mm_segment_t old_fs = get_fs();
29 		set_fs(KERNEL_DS);
30 		__asm__ __volatile__(".chip 68040\n\t"
31 				     "pflush (%0)\n\t"
32 				     ".chip 68k"
33 				     : : "a" (addr));
34 		set_fs(old_fs);
35 	} else
36 		__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
37 }
38 
39 
get_pte_fast(void)40 static inline pte_t *get_pte_fast(void)
41 {
42 	unsigned long *ret;
43 
44 	ret = pte_quicklist;
45 	if (ret) {
46 		pte_quicklist = (unsigned long *)*ret;
47 		ret[0] = 0;
48 		quicklists.pgtable_cache_sz -= 8;
49 	}
50 	return (pte_t *)ret;
51 }
52 #define pte_alloc_one_fast(mm,addr)  get_pte_fast()
53 
pte_alloc_one(struct mm_struct * mm,unsigned long address)54 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
55 {
56  	pte_t *pte;
57 
58 	pte = (pte_t *) __get_free_page(GFP_KERNEL);
59 	if (pte) {
60 	         clear_page(pte);
61 		 __flush_page_to_ram((unsigned long)pte);
62 		 flush_tlb_kernel_page((unsigned long)pte);
63 		 nocache_page((unsigned long)pte);
64 		}
65 
66 	return pte;
67 }
68 
69 
pmd_alloc_one(struct mm_struct * mm,unsigned long address)70 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
71 {
72         return get_pointer_table();
73 }
74 
75 
free_pte_fast(pte_t * pte)76 static inline void free_pte_fast(pte_t *pte)
77 {
78 	*(unsigned long *)pte = (unsigned long)pte_quicklist;
79 	pte_quicklist = (unsigned long *)pte;
80 	quicklists.pgtable_cache_sz += 8;
81 }
82 
free_pte_slow(pte_t * pte)83 static inline void free_pte_slow(pte_t *pte)
84 {
85 	cache_page((unsigned long)pte);
86 	free_page((unsigned long) pte);
87 }
88 
get_pmd_fast(void)89 static inline pmd_t *get_pmd_fast(void)
90 {
91 	unsigned long *ret;
92 
93 	ret = pmd_quicklist;
94 	if (ret) {
95 		pmd_quicklist = (unsigned long *)*ret;
96 		ret[0] = 0;
97 		quicklists.pgtable_cache_sz--;
98 	}
99 	return (pmd_t *)ret;
100 }
101 #define pmd_alloc_one_fast(mm,addr) get_pmd_fast()
102 
free_pmd_fast(pmd_t * pmd)103 static inline void free_pmd_fast(pmd_t *pmd)
104 {
105 	*(unsigned long *)pmd = (unsigned long)pmd_quicklist;
106 	pmd_quicklist = (unsigned long *) pmd;
107 	quicklists.pgtable_cache_sz++;
108 }
109 
free_pmd_slow(pmd_t * pmd)110 static inline int free_pmd_slow(pmd_t *pmd)
111 {
112 	return free_pointer_table(pmd);
113 }
114 
115 /* The pgd cache is folded into the pmd cache, so these are dummy routines. */
get_pgd_fast(void)116 static inline pgd_t *get_pgd_fast(void)
117 {
118 	return (pgd_t *)0;
119 }
120 
free_pgd_fast(pgd_t * pgd)121 static inline void free_pgd_fast(pgd_t *pgd)
122 {
123 }
124 
free_pgd_slow(pgd_t * pgd)125 static inline void free_pgd_slow(pgd_t *pgd)
126 {
127 }
128 
129 extern void __bad_pte(pmd_t *pmd);
130 extern void __bad_pmd(pgd_t *pgd);
131 
pte_free(pte_t * pte)132 static inline void pte_free(pte_t *pte)
133 {
134 	free_pte_fast(pte);
135 }
136 
pmd_free(pmd_t * pmd)137 static inline void pmd_free(pmd_t *pmd)
138 {
139 	free_pmd_fast(pmd);
140 }
141 
142 
pte_free_kernel(pte_t * pte)143 static inline void pte_free_kernel(pte_t *pte)
144 {
145 	free_pte_fast(pte);
146 }
147 
pte_alloc_kernel(pmd_t * pmd,unsigned long address)148 static inline pte_t *pte_alloc_kernel(pmd_t *pmd, unsigned long address)
149 {
150 	return pte_alloc(&init_mm,pmd, address);
151 }
152 
pmd_free_kernel(pmd_t * pmd)153 static inline void pmd_free_kernel(pmd_t *pmd)
154 {
155 	free_pmd_fast(pmd);
156 }
157 
pmd_alloc_kernel(pgd_t * pgd,unsigned long address)158 static inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
159 {
160 	return pmd_alloc(&init_mm,pgd, address);
161 }
162 
pgd_free(pgd_t * pgd)163 static inline void pgd_free(pgd_t *pgd)
164 {
165 	free_pmd_fast((pmd_t *)pgd);
166 }
167 
pgd_alloc(struct mm_struct * mm)168 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
169 {
170 	pgd_t *pgd = (pgd_t *)get_pmd_fast();
171 	if (!pgd)
172 		pgd = (pgd_t *)get_pointer_table();
173 	return pgd;
174 }
175 
176 
177 #define pmd_populate(MM, PMD, PTE)	pmd_set(PMD, PTE)
178 #define pgd_populate(MM, PGD, PMD)	pgd_set(PGD, PMD)
179 
180 
181 extern int do_check_pgt_cache(int, int);
182 
set_pgdir(unsigned long address,pgd_t entry)183 static inline void set_pgdir(unsigned long address, pgd_t entry)
184 {
185 }
186 
187 
188 /*
189  * flush all user-space atc entries.
190  */
__flush_tlb(void)191 static inline void __flush_tlb(void)
192 {
193 	if (CPU_IS_040_OR_060)
194 		__asm__ __volatile__(".chip 68040\n\t"
195 				     "pflushan\n\t"
196 				     ".chip 68k");
197 	else
198 		__asm__ __volatile__("pflush #0,#4");
199 }
200 
__flush_tlb040_one(unsigned long addr)201 static inline void __flush_tlb040_one(unsigned long addr)
202 {
203 	__asm__ __volatile__(".chip 68040\n\t"
204 			     "pflush (%0)\n\t"
205 			     ".chip 68k"
206 			     : : "a" (addr));
207 }
208 
__flush_tlb_one(unsigned long addr)209 static inline void __flush_tlb_one(unsigned long addr)
210 {
211 	if (CPU_IS_040_OR_060)
212 		__flush_tlb040_one(addr);
213 	else
214 		__asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
215 }
216 
217 #define flush_tlb() __flush_tlb()
218 
219 /*
220  * flush all atc entries (both kernel and user-space entries).
221  */
flush_tlb_all(void)222 static inline void flush_tlb_all(void)
223 {
224 	if (CPU_IS_040_OR_060)
225 		__asm__ __volatile__(".chip 68040\n\t"
226 				     "pflusha\n\t"
227 				     ".chip 68k");
228 	else
229 		__asm__ __volatile__("pflusha");
230 }
231 
flush_tlb_mm(struct mm_struct * mm)232 static inline void flush_tlb_mm(struct mm_struct *mm)
233 {
234 	if (mm == current->active_mm)
235 		__flush_tlb();
236 }
237 
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)238 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
239 {
240 	if (vma->vm_mm == current->active_mm) {
241 		mm_segment_t old_fs = get_fs();
242 		set_fs(USER_DS);
243 		__flush_tlb_one(addr);
244 		set_fs(old_fs);
245 	}
246 }
247 
flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)248 static inline void flush_tlb_range(struct mm_struct *mm,
249 				   unsigned long start, unsigned long end)
250 {
251 	if (mm == current->active_mm)
252 		__flush_tlb();
253 }
254 
255 
flush_tlb_pgtables(struct mm_struct * mm,unsigned long start,unsigned long end)256 static inline void flush_tlb_pgtables(struct mm_struct *mm,
257 				      unsigned long start, unsigned long end)
258 {
259 }
260 
261 #endif /* _MOTOROLA_PGALLOC_H */
262