1 /* $Id: pgtable.h,v 1.109 2001/11/13 00:49:32 davem Exp $ */
2 #ifndef _SPARC_PGTABLE_H
3 #define _SPARC_PGTABLE_H
4 
5 /*  asm-sparc/pgtable.h:  Defines and functions used to work
6  *                        with Sparc page tables.
7  *
8  *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
9  *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10  */
11 
12 #include <linux/config.h>
13 #include <linux/spinlock.h>
14 #include <asm/asi.h>
15 #ifdef CONFIG_SUN4
16 #include <asm/pgtsun4.h>
17 #else
18 #include <asm/pgtsun4c.h>
19 #endif
20 #include <asm/pgtsrmmu.h>
21 #include <asm/vac-ops.h>
22 #include <asm/oplib.h>
23 #include <asm/sbus.h>
24 #include <asm/btfixup.h>
25 #include <asm/system.h>
26 
27 #ifndef __ASSEMBLY__
28 
29 extern void load_mmu(void);
30 extern unsigned long calc_highpages(void);
31 
32 BTFIXUPDEF_CALL(void, quick_kernel_fault, unsigned long)
33 
34 #define quick_kernel_fault(addr) BTFIXUP_CALL(quick_kernel_fault)(addr)
35 
36 /* Routines for data transfer buffers. */
37 BTFIXUPDEF_CALL(char *, mmu_lockarea, char *, unsigned long)
38 BTFIXUPDEF_CALL(void,   mmu_unlockarea, char *, unsigned long)
39 
40 #define mmu_lockarea(vaddr,len) BTFIXUP_CALL(mmu_lockarea)(vaddr,len)
41 #define mmu_unlockarea(vaddr,len) BTFIXUP_CALL(mmu_unlockarea)(vaddr,len)
42 
43 /* These are implementations for sbus_map_sg/sbus_unmap_sg... collapse later */
44 BTFIXUPDEF_CALL(__u32, mmu_get_scsi_one, char *, unsigned long, struct sbus_bus *sbus)
45 BTFIXUPDEF_CALL(void,  mmu_get_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
46 BTFIXUPDEF_CALL(void,  mmu_release_scsi_one, __u32, unsigned long, struct sbus_bus *sbus)
47 BTFIXUPDEF_CALL(void,  mmu_release_scsi_sgl, struct scatterlist *, int, struct sbus_bus *sbus)
48 
49 #define mmu_get_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_get_scsi_one)(vaddr,len,sbus)
50 #define mmu_get_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_get_scsi_sgl)(sg,sz,sbus)
51 #define mmu_release_scsi_one(vaddr,len,sbus) BTFIXUP_CALL(mmu_release_scsi_one)(vaddr,len,sbus)
52 #define mmu_release_scsi_sgl(sg,sz,sbus) BTFIXUP_CALL(mmu_release_scsi_sgl)(sg,sz,sbus)
53 
54 /*
55  * mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
56  */
57 BTFIXUPDEF_CALL(void,  mmu_map_dma_area, unsigned long va, __u32 addr, int len)
58 BTFIXUPDEF_CALL(unsigned long /*phys*/, mmu_translate_dvma, unsigned long busa)
59 BTFIXUPDEF_CALL(void,  mmu_unmap_dma_area, unsigned long busa, int len)
60 
61 #define mmu_map_dma_area(va, ba,len) BTFIXUP_CALL(mmu_map_dma_area)(va,ba,len)
62 #define mmu_unmap_dma_area(ba,len) BTFIXUP_CALL(mmu_unmap_dma_area)(ba,len)
63 #define mmu_translate_dvma(ba)     BTFIXUP_CALL(mmu_translate_dvma)(ba)
64 
65 BTFIXUPDEF_SIMM13(pmd_shift)
66 BTFIXUPDEF_SETHI(pmd_size)
67 BTFIXUPDEF_SETHI(pmd_mask)
68 
69 extern unsigned int pmd_align(unsigned int addr) __attribute__((const));
pmd_align(unsigned int addr)70 extern __inline__ unsigned int pmd_align(unsigned int addr)
71 {
72 	return ((addr + ~BTFIXUP_SETHI(pmd_mask)) & BTFIXUP_SETHI(pmd_mask));
73 }
74 
75 BTFIXUPDEF_SIMM13(pgdir_shift)
76 BTFIXUPDEF_SETHI(pgdir_size)
77 BTFIXUPDEF_SETHI(pgdir_mask)
78 
79 extern unsigned int pgdir_align(unsigned int addr) __attribute__((const));
pgdir_align(unsigned int addr)80 extern __inline__ unsigned int pgdir_align(unsigned int addr)
81 {
82 	return ((addr + ~BTFIXUP_SETHI(pgdir_mask)) & BTFIXUP_SETHI(pgdir_mask));
83 }
84 
85 BTFIXUPDEF_SIMM13(ptrs_per_pte)
86 BTFIXUPDEF_SIMM13(ptrs_per_pmd)
87 BTFIXUPDEF_SIMM13(ptrs_per_pgd)
88 BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
89 
90 #define VMALLOC_VMADDR(x) ((unsigned long)(x))
91 
92 #define pte_ERROR(e)   __builtin_trap()
93 #define pmd_ERROR(e)   __builtin_trap()
94 #define pgd_ERROR(e)   __builtin_trap()
95 
96 BTFIXUPDEF_INT(page_none)
97 BTFIXUPDEF_INT(page_shared)
98 BTFIXUPDEF_INT(page_copy)
99 BTFIXUPDEF_INT(page_readonly)
100 BTFIXUPDEF_INT(page_kernel)
101 
102 #define PMD_SHIFT       	BTFIXUP_SIMM13(pmd_shift)
103 #define PMD_SIZE        	BTFIXUP_SETHI(pmd_size)
104 #define PMD_MASK        	BTFIXUP_SETHI(pmd_mask)
105 #define PMD_ALIGN(addr) 	pmd_align(addr)
106 #define PGDIR_SHIFT     	BTFIXUP_SIMM13(pgdir_shift)
107 #define PGDIR_SIZE      	BTFIXUP_SETHI(pgdir_size)
108 #define PGDIR_MASK      	BTFIXUP_SETHI(pgdir_mask)
109 #define PGDIR_ALIGN     	pgdir_align(addr)
110 #define PTRS_PER_PTE    	BTFIXUP_SIMM13(ptrs_per_pte)
111 #define PTRS_PER_PMD    	BTFIXUP_SIMM13(ptrs_per_pmd)
112 #define PTRS_PER_PGD    	BTFIXUP_SIMM13(ptrs_per_pgd)
113 #define USER_PTRS_PER_PGD	BTFIXUP_SIMM13(user_ptrs_per_pgd)
114 #define FIRST_USER_PGD_NR	0
115 
116 #define PAGE_NONE      __pgprot(BTFIXUP_INT(page_none))
117 #define PAGE_SHARED    __pgprot(BTFIXUP_INT(page_shared))
118 #define PAGE_COPY      __pgprot(BTFIXUP_INT(page_copy))
119 #define PAGE_READONLY  __pgprot(BTFIXUP_INT(page_readonly))
120 
121 extern unsigned long page_kernel;
122 
123 #ifdef MODULE
124 #define PAGE_KERNEL	page_kernel
125 #else
126 #define PAGE_KERNEL    __pgprot(BTFIXUP_INT(page_kernel))
127 #endif
128 
129 /* Top-level page directory */
130 extern pgd_t swapper_pg_dir[1024];
131 
132 /* Page table for 0-4MB for everybody, on the Sparc this
133  * holds the same as on the i386.
134  */
135 extern pte_t pg0[1024];
136 extern pte_t pg1[1024];
137 extern pte_t pg2[1024];
138 extern pte_t pg3[1024];
139 
140 extern unsigned long ptr_in_current_pgd;
141 
142 /* Here is a trick, since mmap.c need the initializer elements for
143  * protection_map[] to be constant at compile time, I set the following
144  * to all zeros.  I set it to the real values after I link in the
145  * appropriate MMU page table routines at boot time.
146  */
147 #define __P000  __pgprot(0)
148 #define __P001  __pgprot(0)
149 #define __P010  __pgprot(0)
150 #define __P011  __pgprot(0)
151 #define __P100  __pgprot(0)
152 #define __P101  __pgprot(0)
153 #define __P110  __pgprot(0)
154 #define __P111  __pgprot(0)
155 
156 #define __S000	__pgprot(0)
157 #define __S001	__pgprot(0)
158 #define __S010	__pgprot(0)
159 #define __S011	__pgprot(0)
160 #define __S100	__pgprot(0)
161 #define __S101	__pgprot(0)
162 #define __S110	__pgprot(0)
163 #define __S111	__pgprot(0)
164 
165 extern int num_contexts;
166 
167 /* First physical page can be anywhere, the following is needed so that
168  * va-->pa and vice versa conversions work properly without performance
169  * hit for all __pa()/__va() operations.
170  */
171 extern unsigned long phys_base;
172 
173 /*
174  * BAD_PAGETABLE is used when we need a bogus page-table, while
175  * BAD_PAGE is used for a bogus page.
176  *
177  * ZERO_PAGE is a global shared page that is always zero: used
178  * for zero-mapped memory areas etc..
179  */
180 extern pte_t * __bad_pagetable(void);
181 extern pte_t __bad_page(void);
182 extern unsigned long empty_zero_page;
183 
184 #define BAD_PAGETABLE __bad_pagetable()
185 #define BAD_PAGE __bad_page()
186 #define ZERO_PAGE(vaddr) (mem_map + (((unsigned long)&empty_zero_page - PAGE_OFFSET + phys_base) >> PAGE_SHIFT))
187 
188 /* number of bits that fit into a memory pointer */
189 #define BITS_PER_PTR      (8*sizeof(unsigned long))
190 
191 /* to align the pointer to a pointer address */
192 #define PTR_MASK          (~(sizeof(void*)-1))
193 
194 #define SIZEOF_PTR_LOG2   2
195 
BTFIXUPDEF_CALL_CONST(unsigned long,pmd_page,pmd_t)196 BTFIXUPDEF_CALL_CONST(unsigned long, pmd_page, pmd_t)
197 BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page, pgd_t)
198 
199 #define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd)
200 #define pgd_page(pgd) BTFIXUP_CALL(pgd_page)(pgd)
201 
202 BTFIXUPDEF_SETHI(none_mask)
203 BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
204 BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
205 
206 extern __inline__ int pte_none(pte_t pte)
207 {
208 	return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask));
209 }
210 
211 #define pte_present(pte) BTFIXUP_CALL(pte_present)(pte)
212 #define pte_clear(pte) BTFIXUP_CALL(pte_clear)(pte)
213 
BTFIXUPDEF_CALL_CONST(int,pmd_bad,pmd_t)214 BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
215 BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
216 BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
217 
218 extern __inline__ int pmd_none(pmd_t pmd)
219 {
220 	return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask));
221 }
222 
223 #define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd)
224 #define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd)
225 #define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd)
226 
227 BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t)
228 BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t)
229 BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t)
230 BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *)
231 
232 #define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd)
233 #define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd)
234 #define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd)
235 #define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd)
236 
237 /*
238  * The following only work if pte_present() is true.
239  * Undefined behaviour if not..
240  */
241 BTFIXUPDEF_HALF(pte_writei)
242 BTFIXUPDEF_HALF(pte_dirtyi)
243 BTFIXUPDEF_HALF(pte_youngi)
244 
245 extern int pte_write(pte_t pte) __attribute__((const));
pte_write(pte_t pte)246 extern __inline__ int pte_write(pte_t pte)
247 {
248 	return pte_val(pte) & BTFIXUP_HALF(pte_writei);
249 }
250 
251 extern int pte_dirty(pte_t pte) __attribute__((const));
pte_dirty(pte_t pte)252 extern __inline__ int pte_dirty(pte_t pte)
253 {
254 	return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
255 }
256 
257 extern int pte_young(pte_t pte) __attribute__((const));
pte_young(pte_t pte)258 extern __inline__ int pte_young(pte_t pte)
259 {
260 	return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
261 }
262 
263 BTFIXUPDEF_HALF(pte_wrprotecti)
264 BTFIXUPDEF_HALF(pte_mkcleani)
265 BTFIXUPDEF_HALF(pte_mkoldi)
266 
267 extern pte_t pte_wrprotect(pte_t pte) __attribute__((const));
pte_wrprotect(pte_t pte)268 extern __inline__ pte_t pte_wrprotect(pte_t pte)
269 {
270 	return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
271 }
272 
273 extern pte_t pte_mkclean(pte_t pte) __attribute__((const));
pte_mkclean(pte_t pte)274 extern __inline__ pte_t pte_mkclean(pte_t pte)
275 {
276 	return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
277 }
278 
279 extern pte_t pte_mkold(pte_t pte) __attribute__((const));
pte_mkold(pte_t pte)280 extern __inline__ pte_t pte_mkold(pte_t pte)
281 {
282 	return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
283 }
284 
285 BTFIXUPDEF_CALL_CONST(pte_t, pte_mkwrite, pte_t)
286 BTFIXUPDEF_CALL_CONST(pte_t, pte_mkdirty, pte_t)
287 BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
288 
289 #define pte_mkwrite(pte) BTFIXUP_CALL(pte_mkwrite)(pte)
290 #define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
291 #define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
292 
293 #define page_pte_prot(page, prot)	mk_pte(page, prot)
294 #define page_pte(page)			page_pte_prot(page, __pgprot(0))
295 
296 BTFIXUPDEF_CALL(struct page *, pte_page, pte_t)
297 #define pte_page(pte) BTFIXUP_CALL(pte_page)(pte)
298 
299 /*
300  * Conversion functions: convert a page and protection to a page entry,
301  * and a page entry and page directory to the page they refer to.
302  */
303 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t)
304 
305 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t)
306 BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
307 
308 #define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot)
309 #define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot)
310 #define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space)
311 
312 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
313 BTFIXUPDEF_CALL(void, pmd_set, pmd_t *, pte_t *)
314 
315 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
316 #define pmd_set(pmdp,ptep) BTFIXUP_CALL(pmd_set)(pmdp,ptep)
317 
318 BTFIXUPDEF_INT(pte_modify_mask)
319 
320 extern pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute__((const));
pte_modify(pte_t pte,pgprot_t newprot)321 extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot)
322 {
323 	return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
324 		pgprot_val(newprot));
325 }
326 
327 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
328 
329 /* to find an entry in a page-table-directory */
330 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
331 
332 /* to find an entry in a kernel page-table-directory */
333 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
334 
335 BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long)
336 BTFIXUPDEF_CALL(pte_t *, pte_offset, pmd_t *, unsigned long)
337 
338 /* Find an entry in the second-level page table.. */
339 #define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr)
340 
341 /* Find an entry in the third-level page table.. */
342 #define pte_offset(dir,addr) BTFIXUP_CALL(pte_offset)(dir,addr)
343 
344 /* The permissions for pgprot_val to make a page mapped on the obio space */
345 extern unsigned int pg_iobits;
346 
347 #define flush_icache_page(vma, pg)      do { } while(0)
348 #define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
349 
350 /* Certain architectures need to do special things when pte's
351  * within a page table are directly modified.  Thus, the following
352  * hook is made available.
353  */
354 
355 BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t)
356 
357 #define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval)
358 
359 struct seq_file;
360 BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *)
361 
362 #define mmu_info(p) BTFIXUP_CALL(mmu_info)(p)
363 
364 /* Fault handler stuff... */
365 #define FAULT_CODE_PROT     0x1
366 #define FAULT_CODE_WRITE    0x2
367 #define FAULT_CODE_USER     0x4
368 
369 BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t)
370 
371 #define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte)
372 
373 extern int invalid_segment;
374 
375 /* Encode and de-code a swap entry */
376 #define SWP_TYPE(x)			(((x).val >> 2) & 0x7f)
377 #define SWP_OFFSET(x)			(((x).val >> 9) & 0x3ffff)
378 #define SWP_ENTRY(type,offset)		((swp_entry_t) { (((type) & 0x7f) << 2) | (((offset) & 0x3ffff) << 9) })
379 #define pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
380 #define swp_entry_to_pte(x)		((pte_t) { (x).val })
381 
382 struct ctx_list {
383 	struct ctx_list *next;
384 	struct ctx_list *prev;
385 	unsigned int ctx_number;
386 	struct mm_struct *ctx_mm;
387 };
388 
389 extern struct ctx_list *ctx_list_pool;  /* Dynamically allocated */
390 extern struct ctx_list ctx_free;        /* Head of free list */
391 extern struct ctx_list ctx_used;        /* Head of used contexts list */
392 
393 #define NO_CONTEXT     -1
394 
remove_from_ctx_list(struct ctx_list * entry)395 extern __inline__ void remove_from_ctx_list(struct ctx_list *entry)
396 {
397 	entry->next->prev = entry->prev;
398 	entry->prev->next = entry->next;
399 }
400 
add_to_ctx_list(struct ctx_list * head,struct ctx_list * entry)401 extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
402 {
403 	entry->next = head;
404 	(entry->prev = head->prev)->next = entry;
405 	head->prev = entry;
406 }
407 #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
408 #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
409 
410 extern __inline__ unsigned long
__get_phys(unsigned long addr)411 __get_phys (unsigned long addr)
412 {
413 	switch (sparc_cpu_model){
414 	case sun4:
415 	case sun4c:
416 		return sun4c_get_pte (addr) << PAGE_SHIFT;
417 	case sun4m:
418 	case sun4d:
419 		return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
420 	default:
421 		return 0;
422 	}
423 }
424 
425 extern __inline__ int
__get_iospace(unsigned long addr)426 __get_iospace (unsigned long addr)
427 {
428 	switch (sparc_cpu_model){
429 	case sun4:
430 	case sun4c:
431 		return -1; /* Don't check iospace on sun4c */
432 	case sun4m:
433 	case sun4d:
434 		return (srmmu_get_pte (addr) >> 28);
435 	default:
436 		return -1;
437 	}
438 }
439 
440 extern unsigned long *sparc_valid_addr_bitmap;
441 
442 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
443 #define kern_addr_valid(addr) \
444 	(test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
445 
446 extern int io_remap_page_range(unsigned long from, unsigned long to,
447 			       unsigned long size, pgprot_t prot, int space);
448 
449 #include <asm-generic/pgtable.h>
450 
451 #endif /* !(__ASSEMBLY__) */
452 
453 /* We provide our own get_unmapped_area to cope with VA holes for userland */
454 #define HAVE_ARCH_UNMAPPED_AREA
455 
456 /*
457  * No page table caches to initialise
458  */
459 #define pgtable_cache_init()	do { } while (0)
460 
461 #endif /* !(_SPARC_PGTABLE_H) */
462