1 /*
2  * PowerPC memory management structures
3  *
4  * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
5  *   PPC64 rework.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; either version
10  * 2 of the License, or (at your option) any later version.
11  */
12 
13 #ifndef _PPC64_MMU_H_
14 #define _PPC64_MMU_H_
15 
16 #ifndef __ASSEMBLY__
17 
18 /* Default "unsigned long" context */
19 typedef unsigned long mm_context_t;
20 
21 /*
22  * Define the size of the cache used for segment table entries.  The first
23  * entry is used as a cache pointer, therefore the actual number of entries
24  * stored is one less than defined here.  Do not change this value without
25  * considering the impact it will have on the layout of the paca in paca.h.
26  */
27 #define STAB_CACHE_SIZE 16
28 
29 /*
30  * Hardware Segment Lookaside Buffer Entry
31  * This structure has been padded out to two 64b doublewords (actual SLBE's are
32  * 94 bits).  This padding facilites use by the segment management
33  * instructions.
34  */
35 typedef struct {
36 	unsigned long esid: 36; /* Effective segment ID */
37 	unsigned long resv0:20; /* Reserved */
38 	unsigned long v:     1; /* Entry valid (v=1) or invalid */
39 	unsigned long resv1: 1; /* Reserved */
40 	unsigned long ks:    1; /* Supervisor (privileged) state storage key */
41 	unsigned long kp:    1; /* Problem state storage key */
42 	unsigned long n:     1; /* No-execute if n=1 */
43 	unsigned long resv2: 3; /* padding to a 64b boundary */
44 } ste_dword0;
45 
46 typedef struct {
47 	unsigned long vsid: 52; /* Virtual segment ID */
48 	unsigned long resv0:12; /* Padding to a 64b boundary */
49 } ste_dword1;
50 
51 typedef struct _STE {
52 	union {
53 		unsigned long dword0;
54 		ste_dword0    dw0;
55 	} dw0;
56 
57 	union {
58 		unsigned long dword1;
59 		ste_dword1    dw1;
60 	} dw1;
61 } STE;
62 
63 typedef struct {
64 	unsigned long esid: 36; /* Effective segment ID */
65 	unsigned long v:     1; /* Entry valid (v=1) or invalid */
66 	unsigned long null1:15; /* padding to a 64b boundary */
67 	unsigned long index:12; /* Index to select SLB entry. Used by slbmte */
68 } slb_dword0;
69 
70 typedef struct {
71 	unsigned long vsid: 52; /* Virtual segment ID */
72 	unsigned long ks:    1; /* Supervisor (privileged) state storage key */
73 	unsigned long kp:    1; /* Problem state storage key */
74 	unsigned long n:     1; /* No-execute if n=1 */
75 	unsigned long l:     1; /* Virt pages are large (l=1) or 4KB (l=0) */
76 	unsigned long c:     1; /* Class */
77 	unsigned long resv0: 7; /* Padding to a 64b boundary */
78 } slb_dword1;
79 
80 typedef struct _SLBE {
81 	union {
82 		unsigned long dword0;
83 		slb_dword0    dw0;
84 	} dw0;
85 
86 	union {
87 		unsigned long dword1;
88 		slb_dword1    dw1;
89 	} dw1;
90 } SLBE;
91 
92 /*
93  * This structure is used in paca.h where the layout depends on the
94  * size being 24B.
95  */
96 typedef struct {
97         unsigned long   real;
98         unsigned long   virt;
99         unsigned long   next_round_robin;
100 } STAB;
101 
102 /* Hardware Page Table Entry */
103 
104 #define HPTES_PER_GROUP 8
105 
106 typedef struct {
107 	unsigned long avpn:57; /* vsid | api == avpn  */
108 	unsigned long :     2; /* Software use */
109 	unsigned long bolted: 1; /* HPTE is "bolted" */
110 	unsigned long lock: 1; /* lock on pSeries SMP */
111 	unsigned long l:    1; /* Virtual page is large (L=1) or 4 KB (L=0) */
112 	unsigned long h:    1; /* Hash function identifier */
113 	unsigned long v:    1; /* Valid (v=1) or invalid (v=0) */
114 } Hpte_dword0;
115 
116 typedef struct {
117 	unsigned long :     6; /* unused - padding */
118 	unsigned long ac:   1; /* Address compare */
119 	unsigned long r:    1; /* Referenced */
120 	unsigned long c:    1; /* Changed */
121 	unsigned long w:	1; /* Write-thru cache mode */
122 	unsigned long i:	1; /* Cache inhibited */
123 	unsigned long m:	1; /* Memory coherence required */
124 	unsigned long g:	1; /* Guarded */
125 	unsigned long n:	1; /* No-execute */
126 	unsigned long pp:   2; /* Page protection bits 1:2 */
127 } Hpte_flags;
128 
129 typedef struct {
130 	unsigned long pp0:  1; /* Page protection bit 0 */
131 	unsigned long ts:   1; /* Tag set bit */
132 	unsigned long rpn: 50; /* Real page number */
133 	unsigned long :     2; /* Reserved */
134 	unsigned long ac:   1; /* Address compare */
135 	unsigned long r:    1; /* Referenced */
136 	unsigned long c:    1; /* Changed */
137 	unsigned long w:	1; /* Write-thru cache mode */
138 	unsigned long i:	1; /* Cache inhibited */
139 	unsigned long m:	1; /* Memory coherence required */
140 	unsigned long g:	1; /* Guarded */
141 	unsigned long n:	1; /* No-execute */
142 	unsigned long pp:	2; /* Page protection bits 1:2 */
143 } Hpte_dword1;
144 
145 typedef struct {
146 	char padding[6];	   	/* padding */
147 	unsigned long :       6;	/* padding */
148 	unsigned long flags: 10;	/* HPTE flags */
149 } Hpte_dword1_flags;
150 
151 typedef struct _HPTE {
152 	union {
153 		unsigned long dword0;
154 		Hpte_dword0   dw0;
155 	} dw0;
156 
157 	union {
158 		unsigned long dword1;
159 		Hpte_dword1 dw1;
160 		Hpte_dword1_flags flags;
161 	} dw1;
162 } HPTE;
163 
164 /* Values for PP (assumes Ks=0, Kp=1) */
165 /* pp0 will always be 0 for linux     */
166 #define PP_RWXX	0	/* Supervisor read/write, User none */
167 #define PP_RWRX 1	/* Supervisor read/write, User read */
168 #define PP_RWRW 2	/* Supervisor read/write, User read/write */
169 #define PP_RXRX 3	/* Supervisor read,       User read */
170 
171 typedef struct {
172 	HPTE *		htab;
173 	unsigned long	htab_num_ptegs;
174 	unsigned long	htab_hash_mask;
175 	unsigned long	next_round_robin;
176 	unsigned long   last_kernel_address;
177 	unsigned long   htab_lock_shift;
178 } HTAB;
179 
180 extern HTAB htab_data;
181 
182 #include <linux/cache.h>
183 #include <linux/spinlock.h>
184 typedef struct {
185 	spinlock_t lock;
186 } ____cacheline_aligned hash_table_lock_t;
187 
188 void invalidate_hpte( unsigned long slot );
189 long select_hpte_slot( unsigned long vpn );
190 void create_valid_hpte( unsigned long slot, unsigned long vpn,
191 			unsigned long prpn, unsigned hash,
192 			void * ptep, unsigned hpteflags,
193 			unsigned bolted );
194 unsigned long get_lock_slot(unsigned long vpn);
195 
196 #define PD_SHIFT (10+12)		/* Page directory */
197 #define PD_MASK  0x02FF
198 #define PT_SHIFT (12)			/* Page Table */
199 #define PT_MASK  0x02FF
200 
201 #define LARGE_PAGE_SHIFT 24
202 
hpt_hash(unsigned long vpn,int large)203 static inline unsigned long hpt_hash(unsigned long vpn, int large)
204 {
205 	unsigned long vsid;
206 	unsigned long page;
207 
208 	if (large) {
209 		vsid = vpn >> 4;
210 		page = vpn & 0xf;
211 	} else {
212 		vsid = vpn >> 16;
213 		page = vpn & 0xffff;
214 	}
215 
216 	return (vsid & 0x7fffffffff) ^ page;
217 }
218 
219 #define PG_SHIFT (12)			/* Page Entry */
220 
221 /*
222  * Invalidate a TLB entry.  Assumes a context syncronizing
223  * instruction preceeded this call (for example taking the
224  * TLB lock).
225  */
_tlbie(unsigned long va,int large)226 static inline void _tlbie(unsigned long va, int large)
227 {
228 	asm volatile("ptesync": : :"memory");
229 
230 	if (large) {
231 		asm volatile("clrldi	%0,%0,16\n\
232 			      tlbie	%0,1" : : "r"(va) : "memory");
233 	} else {
234 		asm volatile("clrldi	%0,%0,16\n\
235 			      tlbie	%0,0" : : "r"(va) : "memory");
236 	}
237 	asm volatile("eieio; tlbsync; ptesync": : :"memory");
238 }
239 
240 #endif /* __ASSEMBLY__ */
241 
242 /*
243  * Location of cpu0's segment table
244  */
245 #define STAB0_PAGE	0x9
246 #define STAB0_PHYS_ADDR	(STAB0_PAGE<<PAGE_SHIFT)
247 #define STAB0_VIRT_ADDR	(KERNELBASE+STAB0_PHYS_ADDR)
248 
249 /* Block size masks */
250 #define BL_128K	0x000
251 #define BL_256K 0x001
252 #define BL_512K 0x003
253 #define BL_1M   0x007
254 #define BL_2M   0x00F
255 #define BL_4M   0x01F
256 #define BL_8M   0x03F
257 #define BL_16M  0x07F
258 #define BL_32M  0x0FF
259 #define BL_64M  0x1FF
260 #define BL_128M 0x3FF
261 #define BL_256M 0x7FF
262 
263 /* Used to set up SDR1 register */
264 #define HASH_TABLE_SIZE_64K	0x00010000
265 #define HASH_TABLE_SIZE_128K	0x00020000
266 #define HASH_TABLE_SIZE_256K	0x00040000
267 #define HASH_TABLE_SIZE_512K	0x00080000
268 #define HASH_TABLE_SIZE_1M	0x00100000
269 #define HASH_TABLE_SIZE_2M	0x00200000
270 #define HASH_TABLE_SIZE_4M	0x00400000
271 #define HASH_TABLE_MASK_64K	0x000
272 #define HASH_TABLE_MASK_128K	0x001
273 #define HASH_TABLE_MASK_256K	0x003
274 #define HASH_TABLE_MASK_512K	0x007
275 #define HASH_TABLE_MASK_1M	0x00F
276 #define HASH_TABLE_MASK_2M	0x01F
277 #define HASH_TABLE_MASK_4M	0x03F
278 
279 /* These are the Ks and Kp from the PowerPC books.  For proper operation,
280  * Ks = 0, Kp = 1.
281  */
282 #define MI_AP		786
283 #define MI_Ks		0x80000000	/* Should not be set */
284 #define MI_Kp		0x40000000	/* Should always be set */
285 
286 /* The effective page number register.  When read, contains the information
287  * about the last instruction TLB miss.  When MI_RPN is written, bits in
288  * this register are used to create the TLB entry.
289  */
290 #define MI_EPN		787
291 #define MI_EPNMASK	0xfffff000	/* Effective page number for entry */
292 #define MI_EVALID	0x00000200	/* Entry is valid */
293 #define MI_ASIDMASK	0x0000000f	/* ASID match value */
294 					/* Reset value is undefined */
295 
296 /* A "level 1" or "segment" or whatever you want to call it register.
297  * For the instruction TLB, it contains bits that get loaded into the
298  * TLB entry when the MI_RPN is written.
299  */
300 #define MI_TWC		789
301 #define MI_APG		0x000001e0	/* Access protection group (0) */
302 #define MI_GUARDED	0x00000010	/* Guarded storage */
303 #define MI_PSMASK	0x0000000c	/* Mask of page size bits */
304 #define MI_PS8MEG	0x0000000c	/* 8M page size */
305 #define MI_PS512K	0x00000004	/* 512K page size */
306 #define MI_PS4K_16K	0x00000000	/* 4K or 16K page size */
307 #define MI_SVALID	0x00000001	/* Segment entry is valid */
308 					/* Reset value is undefined */
309 
310 /* Real page number.  Defined by the pte.  Writing this register
311  * causes a TLB entry to be created for the instruction TLB, using
312  * additional information from the MI_EPN, and MI_TWC registers.
313  */
314 #define MI_RPN		790
315 
316 /* Define an RPN value for mapping kernel memory to large virtual
317  * pages for boot initialization.  This has real page number of 0,
318  * large page size, shared page, cache enabled, and valid.
319  * Also mark all subpages valid and write access.
320  */
321 #define MI_BOOTINIT	0x000001fd
322 
323 #define MD_CTR		792	/* Data TLB control register */
324 #define MD_GPM		0x80000000	/* Set domain manager mode */
325 #define MD_PPM		0x40000000	/* Set subpage protection */
326 #define MD_CIDEF	0x20000000	/* Set cache inhibit when MMU dis */
327 #define MD_WTDEF	0x10000000	/* Set writethrough when MMU dis */
328 #define MD_RSV4I	0x08000000	/* Reserve 4 TLB entries */
329 #define MD_TWAM		0x04000000	/* Use 4K page hardware assist */
330 #define MD_PPCS		0x02000000	/* Use MI_RPN prob/priv state */
331 #define MD_IDXMASK	0x00001f00	/* TLB index to be loaded */
332 #define MD_RESETVAL	0x04000000	/* Value of register at reset */
333 
334 #define M_CASID		793	/* Address space ID (context) to match */
335 #define MC_ASIDMASK	0x0000000f	/* Bits used for ASID value */
336 
337 
338 /* These are the Ks and Kp from the PowerPC books.  For proper operation,
339  * Ks = 0, Kp = 1.
340  */
341 #define MD_AP		794
342 #define MD_Ks		0x80000000	/* Should not be set */
343 #define MD_Kp		0x40000000	/* Should always be set */
344 
345 /* The effective page number register.  When read, contains the information
346  * about the last instruction TLB miss.  When MD_RPN is written, bits in
347  * this register are used to create the TLB entry.
348  */
349 #define MD_EPN		795
350 #define MD_EPNMASK	0xfffff000	/* Effective page number for entry */
351 #define MD_EVALID	0x00000200	/* Entry is valid */
352 #define MD_ASIDMASK	0x0000000f	/* ASID match value */
353 					/* Reset value is undefined */
354 
355 /* The pointer to the base address of the first level page table.
356  * During a software tablewalk, reading this register provides the address
357  * of the entry associated with MD_EPN.
358  */
359 #define M_TWB		796
360 #define	M_L1TB		0xfffff000	/* Level 1 table base address */
361 #define M_L1INDX	0x00000ffc	/* Level 1 index, when read */
362 					/* Reset value is undefined */
363 
364 /* A "level 1" or "segment" or whatever you want to call it register.
365  * For the data TLB, it contains bits that get loaded into the TLB entry
366  * when the MD_RPN is written.  It is also provides the hardware assist
367  * for finding the PTE address during software tablewalk.
368  */
369 #define MD_TWC		797
370 #define MD_L2TB		0xfffff000	/* Level 2 table base address */
371 #define MD_L2INDX	0xfffffe00	/* Level 2 index (*pte), when read */
372 #define MD_APG		0x000001e0	/* Access protection group (0) */
373 #define MD_GUARDED	0x00000010	/* Guarded storage */
374 #define MD_PSMASK	0x0000000c	/* Mask of page size bits */
375 #define MD_PS8MEG	0x0000000c	/* 8M page size */
376 #define MD_PS512K	0x00000004	/* 512K page size */
377 #define MD_PS4K_16K	0x00000000	/* 4K or 16K page size */
378 #define MD_WT		0x00000002	/* Use writethrough page attribute */
379 #define MD_SVALID	0x00000001	/* Segment entry is valid */
380 					/* Reset value is undefined */
381 
382 
383 /* Real page number.  Defined by the pte.  Writing this register
384  * causes a TLB entry to be created for the data TLB, using
385  * additional information from the MD_EPN, and MD_TWC registers.
386  */
387 #define MD_RPN		798
388 
389 /* This is a temporary storage register that could be used to save
390  * a processor working register during a tablewalk.
391  */
392 #define M_TW		799
393 
394 #endif /* _PPC64_MMU_H_ */
395