1 #ifndef _PPC_PAGE_H
2 #define _PPC_PAGE_H
3 
4 /* PAGE_SHIFT determines the page size */
5 #define PAGE_SHIFT	12
6 #define PAGE_SIZE	(1UL << PAGE_SHIFT)
7 /*
8  * Subtle: this is an int (not an unsigned long) and so it
9  * gets extended to 64 bits the way want (i.e. with 1s).  -- paulus
10  */
11 #define PAGE_MASK	(~((1 << PAGE_SHIFT) - 1))
12 
13 #ifdef __KERNEL__
14 #include <linux/config.h>
15 
16 #define PAGE_OFFSET	CONFIG_KERNEL_START
17 #define KERNELBASE	PAGE_OFFSET
18 
19 #ifndef __ASSEMBLY__
20 /*
21  * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
22  * physical addressing.  For now this just the IBM PPC440.
23  */
24 #ifdef CONFIG_PTE_64BIT
25 typedef unsigned long long pte_basic_t;
26 #define PTE_SHIFT	(PAGE_SHIFT - 3)	/* 512 ptes per page */
27 #define PTE_FMT		"%16Lx"
28 #else
29 typedef unsigned long pte_basic_t;
30 #define PTE_SHIFT	(PAGE_SHIFT - 2)	/* 1024 ptes per page */
31 #define PTE_FMT		"%.8lx"
32 #endif
33 
34 #include <asm/system.h> /* for xmon definition */
35 
36 #ifdef CONFIG_XMON
37 #define BUG() do { \
38 	printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
39 	xmon(0); \
40 } while (0)
41 #else
42 #define BUG() do { \
43 	printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
44 	__asm__ __volatile__(".long 0x0"); \
45 } while (0)
46 #endif
47 #define PAGE_BUG(page) do { BUG(); } while (0)
48 
49 #define STRICT_MM_TYPECHECKS
50 
51 #ifdef STRICT_MM_TYPECHECKS
52 /*
53  * These are used to make use of C type-checking..
54  */
55 typedef struct { pte_basic_t pte; } pte_t;
56 typedef struct { unsigned long pmd; } pmd_t;
57 typedef struct { unsigned long pgd; } pgd_t;
58 typedef struct { unsigned long pgprot; } pgprot_t;
59 
60 #define pte_val(x)	((x).pte)
61 #define pmd_val(x)	((x).pmd)
62 #define pgd_val(x)	((x).pgd)
63 #define pgprot_val(x)	((x).pgprot)
64 
65 #define __pte(x)	((pte_t) { (x) } )
66 #define __pmd(x)	((pmd_t) { (x) } )
67 #define __pgd(x)	((pgd_t) { (x) } )
68 #define __pgprot(x)	((pgprot_t) { (x) } )
69 
70 #else
71 /*
72  * .. while these make it easier on the compiler
73  */
74 typedef unsigned long pte_t;
75 typedef unsigned long pmd_t;
76 typedef unsigned long pgd_t;
77 typedef unsigned long pgprot_t;
78 
79 #define pte_val(x)	(x)
80 #define pmd_val(x)	(x)
81 #define pgd_val(x)	(x)
82 #define pgprot_val(x)	(x)
83 
84 #define __pte(x)	(x)
85 #define __pmd(x)	(x)
86 #define __pgd(x)	(x)
87 #define __pgprot(x)	(x)
88 
89 #endif
90 
91 
92 /* align addr on a size boundry - adjust address up if needed -- Cort */
93 #define _ALIGN(addr,size)	(((addr)+(size)-1)&(~((size)-1)))
94 
95 /* to align the pointer to the (next) page boundary */
96 #define PAGE_ALIGN(addr)	(((addr)+PAGE_SIZE-1)&PAGE_MASK)
97 
98 extern void clear_page(void *page);
99 extern void copy_page(void *to, void *from);
100 extern void clear_user_page(void *page, unsigned long vaddr);
101 extern void copy_user_page(void *to, void *from, unsigned long vaddr);
102 
103 extern unsigned long ppc_memstart;
104 extern unsigned long ppc_memoffset;
105 #ifndef CONFIG_APUS
106 #define PPC_MEMSTART	0
107 #define PPC_MEMOFFSET	PAGE_OFFSET
108 #else
109 #define PPC_MEMSTART	ppc_memstart
110 #define PPC_MEMOFFSET	ppc_memoffset
111 #endif
112 
113 #if defined(CONFIG_APUS) && !defined(MODULE)
114 /* map phys->virtual and virtual->phys for RAM pages */
___pa(unsigned long v)115 static inline unsigned long ___pa(unsigned long v)
116 {
117 	unsigned long p;
118 	asm volatile ("1: addis %0, %1, %2;"
119 		      ".section \".vtop_fixup\",\"aw\";"
120 		      ".align  1;"
121 		      ".long   1b;"
122 		      ".previous;"
123 		      : "=r" (p)
124 		      : "b" (v), "K" (((-PAGE_OFFSET) >> 16) & 0xffff));
125 
126 	return p;
127 }
___va(unsigned long p)128 static inline void* ___va(unsigned long p)
129 {
130 	unsigned long v;
131 	asm volatile ("1: addis %0, %1, %2;"
132 		      ".section \".ptov_fixup\",\"aw\";"
133 		      ".align  1;"
134 		      ".long   1b;"
135 		      ".previous;"
136 		      : "=r" (v)
137 		      : "b" (p), "K" (((PAGE_OFFSET) >> 16) & 0xffff));
138 
139 	return (void*) v;
140 }
141 #else
142 #define ___pa(vaddr) ((vaddr)-PPC_MEMOFFSET)
143 #define ___va(paddr) ((paddr)+PPC_MEMOFFSET)
144 #endif
145 
146 #define __pa(x) ___pa((unsigned long)(x))
147 #define __va(x) ((void *)(___va((unsigned long)(x))))
148 
149 #define MAP_PAGE_RESERVED	(1<<15)
150 #define virt_to_page(kaddr)	(mem_map + (((unsigned long)(kaddr)-PAGE_OFFSET) >> PAGE_SHIFT))
151 #define VALID_PAGE(page)	(((page) - mem_map) < max_mapnr)
152 
153 extern unsigned long get_zero_page_fast(void);
154 
155 /* Pure 2^n version of get_order */
get_order(unsigned long size)156 extern __inline__ int get_order(unsigned long size)
157 {
158 	int order;
159 
160 	size = (size-1) >> (PAGE_SHIFT-1);
161 	order = -1;
162 	do {
163 		size >>= 1;
164 		order++;
165 	} while (size);
166 	return order;
167 }
168 
169 #endif /* __ASSEMBLY__ */
170 
171 #define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
172 				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
173 
174 #endif /* __KERNEL__ */
175 #endif /* _PPC_PAGE_H */
176