1 /* sun3_pgalloc.h --
2 * reorganization around 2.3.39, routines moved from sun3_pgtable.h
3 *
4 * moved 1/26/2000 Sam Creasey
5 */
6
7 #ifndef _SUN3_PGALLOC_H
8 #define _SUN3_PGALLOC_H
9
10 /* Pagetable caches. */
11 //todo: should implement for at least ptes. --m
12 #define pgd_quicklist ((unsigned long *) 0)
13 #define pmd_quicklist ((unsigned long *) 0)
14 #define pte_quicklist ((unsigned long *) 0)
15 #define pgtable_cache_size (0L)
16
17 /* Allocation and deallocation of various flavours of pagetables. */
free_pmd_fast(pmd_t * pmdp)18 static inline int free_pmd_fast(pmd_t *pmdp) { return 0; }
free_pmd_slow(pmd_t * pmdp)19 static inline int free_pmd_slow(pmd_t *pmdp) { return 0; }
get_pmd_fast(void)20 static inline pmd_t *get_pmd_fast (void) { return (pmd_t *) 0; }
21
22 //todo: implement the following properly.
23 #define get_pte_fast() ((pte_t *) 0)
24 #define get_pte_slow pte_alloc
25 #define free_pte_fast(pte)
26 #define free_pte_slow pte_free
27
28 /* FIXME - when we get this compiling */
29 /* erm, now that it's compiling, what do we do with it? */
30 #define _KERNPG_TABLE 0
31
pte_free_kernel(pte_t * pte)32 static inline void pte_free_kernel(pte_t *pte)
33 {
34 free_page((unsigned long) pte);
35 }
36
37 extern const char bad_pmd_string[];
38
pte_alloc_kernel(pmd_t * pmd,unsigned long address)39 static inline pte_t *pte_alloc_kernel(pmd_t *pmd, unsigned long address)
40 {
41 address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
42 if (pmd_none(*pmd)) {
43 pte_t * page = (pte_t *) get_free_page(GFP_KERNEL);
44 if (pmd_none(*pmd)) {
45 if (page) {
46 pmd_val(*pmd) = _KERNPG_TABLE + __pa(page);
47 return page + address;
48 }
49 pmd_val(*pmd) = _KERNPG_TABLE + __pa((unsigned long)BAD_PAGETABLE);
50 return NULL;
51 }
52 free_page((unsigned long) page);
53 }
54 if (pmd_bad(*pmd)) {
55 printk(bad_pmd_string, pmd_val(*pmd));
56 printk("at kernel pgd off %08x\n", (unsigned int)pmd);
57 pmd_val(*pmd) = _KERNPG_TABLE + __pa((unsigned long)BAD_PAGETABLE);
58 return NULL;
59 }
60 return (pte_t *) __pmd_page(*pmd) + address;
61 }
62
63 /*
64 * allocating and freeing a pmd is trivial: the 1-entry pmd is
65 * inside the pgd, so has no extra memory associated with it.
66 */
pmd_free_kernel(pmd_t * pmd)67 static inline void pmd_free_kernel(pmd_t *pmd)
68 {
69 // pmd_val(*pmd) = 0;
70 }
71
pmd_alloc_kernel(pgd_t * pgd,unsigned long address)72 static inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
73 {
74 return (pmd_t *) pgd;
75 }
76
77 #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
78 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
79
pte_free(pte_t * pte)80 static inline void pte_free(pte_t *pte)
81 {
82 free_page((unsigned long) pte);
83 }
84
pte_alloc_one(struct mm_struct * mm,unsigned long address)85 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
86 {
87 unsigned long page = __get_free_page(GFP_KERNEL);
88
89 if (!page)
90 return NULL;
91
92 memset((void *)page, 0, PAGE_SIZE);
93 // pmd_val(*pmd) = SUN3_PMD_MAGIC + __pa(page);
94 /* pmd_val(*pmd) = __pa(page); */
95 return (pte_t *) (page);
96 }
97
98 #define pte_alloc_one_fast(mm,addr) pte_alloc_one(mm,addr)
99
100 #define pmd_populate(mm, pmd, pte) (pmd_val(*pmd) = __pa((unsigned long)pte))
101
102 /*
103 * allocating and freeing a pmd is trivial: the 1-entry pmd is
104 * inside the pgd, so has no extra memory associated with it.
105 */
pmd_free(pmd_t * pmd)106 static inline void pmd_free(pmd_t *pmd)
107 {
108 pmd_val(*pmd) = 0;
109 }
110
pgd_free(pgd_t * pgd)111 static inline void pgd_free(pgd_t *pgd)
112 {
113 free_page((unsigned long) pgd);
114 }
115
pgd_alloc(struct mm_struct * mm)116 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
117 {
118 pgd_t *new_pgd;
119
120 new_pgd = (pgd_t *)get_free_page(GFP_KERNEL);
121 memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
122 memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT));
123 return new_pgd;
124 }
125
126 #define pgd_populate(mm, pmd, pte) BUG()
127
128 /* FIXME: the sun3 doesn't have a page table cache!
129 (but the motorola routine should just return 0) */
130
131 extern int do_check_pgt_cache(int, int);
132
set_pgdir(unsigned long address,pgd_t entry)133 static inline void set_pgdir(unsigned long address, pgd_t entry)
134 {
135 }
136
137 /* Reserved PMEGs. */
138 extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
139 extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
140 extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
141 extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
142
143 /* Flush all userspace mappings one by one... (why no flush command,
144 sun?) */
flush_tlb_all(void)145 static inline void flush_tlb_all(void)
146 {
147 unsigned long addr;
148 unsigned char ctx, oldctx;
149
150 oldctx = sun3_get_context();
151 for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
152 for(ctx = 0; ctx < 8; ctx++) {
153 sun3_put_context(ctx);
154 sun3_put_segmap(addr, SUN3_INVALID_PMEG);
155 }
156 }
157
158 sun3_put_context(oldctx);
159 /* erase all of the userspace pmeg maps, we've clobbered them
160 all anyway */
161 for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
162 if(pmeg_alloc[addr] == 1) {
163 pmeg_alloc[addr] = 0;
164 pmeg_ctx[addr] = 0;
165 pmeg_vaddr[addr] = 0;
166 }
167 }
168
169 }
170
171 /* Clear user TLB entries within the context named in mm */
flush_tlb_mm(struct mm_struct * mm)172 static inline void flush_tlb_mm (struct mm_struct *mm)
173 {
174 unsigned char oldctx;
175 unsigned char seg;
176 unsigned long i;
177
178 oldctx = sun3_get_context();
179 sun3_put_context(mm->context);
180
181 for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
182 seg = sun3_get_segmap(i);
183 if(seg == SUN3_INVALID_PMEG)
184 continue;
185
186 sun3_put_segmap(i, SUN3_INVALID_PMEG);
187 pmeg_alloc[seg] = 0;
188 pmeg_ctx[seg] = 0;
189 pmeg_vaddr[seg] = 0;
190 }
191
192 sun3_put_context(oldctx);
193
194 }
195
196 /* Flush a single TLB page. In this case, we're limited to flushing a
197 single PMEG */
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)198 static inline void flush_tlb_page (struct vm_area_struct *vma,
199 unsigned long addr)
200 {
201 unsigned char oldctx;
202 unsigned char i;
203
204 oldctx = sun3_get_context();
205 sun3_put_context(vma->vm_mm->context);
206 addr &= ~SUN3_PMEG_MASK;
207 if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
208 {
209 pmeg_alloc[i] = 0;
210 pmeg_ctx[i] = 0;
211 pmeg_vaddr[i] = 0;
212 sun3_put_segmap (addr, SUN3_INVALID_PMEG);
213 }
214 sun3_put_context(oldctx);
215
216 }
217 /* Flush a range of pages from TLB. */
218
flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)219 static inline void flush_tlb_range (struct mm_struct *mm,
220 unsigned long start, unsigned long end)
221 {
222 unsigned char seg, oldctx;
223
224 start &= ~SUN3_PMEG_MASK;
225
226 oldctx = sun3_get_context();
227 sun3_put_context(mm->context);
228
229 while(start < end)
230 {
231 if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
232 goto next;
233 if(pmeg_ctx[seg] == mm->context) {
234 pmeg_alloc[seg] = 0;
235 pmeg_ctx[seg] = 0;
236 pmeg_vaddr[seg] = 0;
237 }
238 sun3_put_segmap(start, SUN3_INVALID_PMEG);
239 next:
240 start += SUN3_PMEG_SIZE;
241 }
242 }
243
244 /* Flush kernel page from TLB. */
flush_tlb_kernel_page(unsigned long addr)245 static inline void flush_tlb_kernel_page (unsigned long addr)
246 {
247 sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
248 }
249
flush_tlb_pgtables(struct mm_struct * mm,unsigned long start,unsigned long end)250 static inline void flush_tlb_pgtables(struct mm_struct *mm,
251 unsigned long start, unsigned long end)
252 {
253 }
254
255 #endif /* SUN3_PGALLOC_H */
256