1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * pgtable.h: SpitFire page table operations.
4  *
5  * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
6  * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7  */
8 
9 #ifndef _SPARC64_PGTABLE_H
10 #define _SPARC64_PGTABLE_H
11 
12 /* This file contains the functions and defines necessary to modify and use
13  * the SpitFire page tables.
14  */
15 
16 #include <asm-generic/pgtable-nop4d.h>
17 #include <linux/compiler.h>
18 #include <linux/const.h>
19 #include <asm/types.h>
20 #include <asm/spitfire.h>
21 #include <asm/asi.h>
22 #include <asm/adi.h>
23 #include <asm/page.h>
24 #include <asm/processor.h>
25 
26 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
27  * The page copy blockops can use 0x6000000 to 0x8000000.
28  * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
29  * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
30  * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
31  * The vmalloc area spans 0x100000000 to 0x200000000.
32  * Since modules need to be in the lowest 32-bits of the address space,
33  * we place them right before the OBP area from 0x10000000 to 0xf0000000.
34  * There is a single static kernel PMD which maps from 0x0 to address
35  * 0x400000000.
36  */
37 #define	TLBTEMP_BASE		_AC(0x0000000006000000,UL)
38 #define	TSBMAP_8K_BASE		_AC(0x0000000008000000,UL)
39 #define	TSBMAP_4M_BASE		_AC(0x0000000008400000,UL)
40 #define MODULES_VADDR		_AC(0x0000000010000000,UL)
41 #define MODULES_LEN		_AC(0x00000000e0000000,UL)
42 #define MODULES_END		_AC(0x00000000f0000000,UL)
43 #define LOW_OBP_ADDRESS		_AC(0x00000000f0000000,UL)
44 #define HI_OBP_ADDRESS		_AC(0x0000000100000000,UL)
45 #define VMALLOC_START		_AC(0x0000000100000000,UL)
46 #define VMEMMAP_BASE		VMALLOC_END
47 
48 /* PMD_SHIFT determines the size of the area a second-level page
49  * table can map
50  */
51 #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-3))
52 #define PMD_SIZE	(_AC(1,UL) << PMD_SHIFT)
53 #define PMD_MASK	(~(PMD_SIZE-1))
54 #define PMD_BITS	(PAGE_SHIFT - 3)
55 
56 /* PUD_SHIFT determines the size of the area a third-level page
57  * table can map
58  */
59 #define PUD_SHIFT	(PMD_SHIFT + PMD_BITS)
60 #define PUD_SIZE	(_AC(1,UL) << PUD_SHIFT)
61 #define PUD_MASK	(~(PUD_SIZE-1))
62 #define PUD_BITS	(PAGE_SHIFT - 3)
63 
64 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
65 #define PGDIR_SHIFT	(PUD_SHIFT + PUD_BITS)
66 #define PGDIR_SIZE	(_AC(1,UL) << PGDIR_SHIFT)
67 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
68 #define PGDIR_BITS	(PAGE_SHIFT - 3)
69 
70 #if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
71 #error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
72 #endif
73 
74 #if (PGDIR_SHIFT + PGDIR_BITS) != 53
75 #error Page table parameters do not cover virtual address space properly.
76 #endif
77 
78 #if (PMD_SHIFT != HPAGE_SHIFT)
79 #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
80 #endif
81 
82 #ifndef __ASSEMBLY__
83 
84 extern unsigned long VMALLOC_END;
85 
86 #define vmemmap			((struct page *)VMEMMAP_BASE)
87 
88 #include <linux/sched.h>
89 
90 bool kern_addr_valid(unsigned long addr);
91 
92 /* Entries per page directory level. */
93 #define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-3))
94 #define PTRS_PER_PMD	(1UL << PMD_BITS)
95 #define PTRS_PER_PUD	(1UL << PUD_BITS)
96 #define PTRS_PER_PGD	(1UL << PGDIR_BITS)
97 
98 #define pmd_ERROR(e)							\
99 	pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n",		\
100 	       __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
101 #define pud_ERROR(e)							\
102 	pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n",		\
103 	       __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
104 #define pgd_ERROR(e)							\
105 	pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n",		\
106 	       __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
107 
108 #endif /* !(__ASSEMBLY__) */
109 
110 /* PTE bits which are the same in SUN4U and SUN4V format.  */
111 #define _PAGE_VALID	  _AC(0x8000000000000000,UL) /* Valid TTE            */
112 #define _PAGE_R	  	  _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
113 #define _PAGE_SPECIAL     _AC(0x0200000000000000,UL) /* Special page         */
114 #define _PAGE_PMD_HUGE    _AC(0x0100000000000000,UL) /* Huge page            */
115 #define _PAGE_PUD_HUGE    _PAGE_PMD_HUGE
116 
117 /* SUN4U pte bits... */
118 #define _PAGE_SZ4MB_4U	  _AC(0x6000000000000000,UL) /* 4MB Page             */
119 #define _PAGE_SZ512K_4U	  _AC(0x4000000000000000,UL) /* 512K Page            */
120 #define _PAGE_SZ64K_4U	  _AC(0x2000000000000000,UL) /* 64K Page             */
121 #define _PAGE_SZ8K_4U	  _AC(0x0000000000000000,UL) /* 8K Page              */
122 #define _PAGE_NFO_4U	  _AC(0x1000000000000000,UL) /* No Fault Only        */
123 #define _PAGE_IE_4U	  _AC(0x0800000000000000,UL) /* Invert Endianness    */
124 #define _PAGE_SOFT2_4U	  _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
125 #define _PAGE_SPECIAL_4U  _AC(0x0200000000000000,UL) /* Special page         */
126 #define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page            */
127 #define _PAGE_RES1_4U	  _AC(0x0002000000000000,UL) /* Reserved             */
128 #define _PAGE_SZ32MB_4U	  _AC(0x0001000000000000,UL) /* (Panther) 32MB page  */
129 #define _PAGE_SZ256MB_4U  _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
130 #define _PAGE_SZALL_4U	  _AC(0x6001000000000000,UL) /* All pgsz bits        */
131 #define _PAGE_SN_4U	  _AC(0x0000800000000000,UL) /* (Cheetah) Snoop      */
132 #define _PAGE_RES2_4U	  _AC(0x0000780000000000,UL) /* Reserved             */
133 #define _PAGE_PADDR_4U	  _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13]  */
134 #define _PAGE_SOFT_4U	  _AC(0x0000000000001F80,UL) /* Software bits:       */
135 #define _PAGE_EXEC_4U	  _AC(0x0000000000001000,UL) /* Executable SW bit    */
136 #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty)     */
137 #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd)     */
138 #define _PAGE_READ_4U	  _AC(0x0000000000000200,UL) /* Readable SW Bit      */
139 #define _PAGE_WRITE_4U	  _AC(0x0000000000000100,UL) /* Writable SW Bit      */
140 #define _PAGE_PRESENT_4U  _AC(0x0000000000000080,UL) /* Present              */
141 #define _PAGE_L_4U	  _AC(0x0000000000000040,UL) /* Locked TTE           */
142 #define _PAGE_CP_4U	  _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
143 #define _PAGE_CV_4U	  _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
144 #define _PAGE_E_4U	  _AC(0x0000000000000008,UL) /* side-Effect          */
145 #define _PAGE_P_4U	  _AC(0x0000000000000004,UL) /* Privileged Page      */
146 #define _PAGE_W_4U	  _AC(0x0000000000000002,UL) /* Writable             */
147 
148 /* SUN4V pte bits... */
149 #define _PAGE_NFO_4V	  _AC(0x4000000000000000,UL) /* No Fault Only        */
150 #define _PAGE_SOFT2_4V	  _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
151 #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty)     */
152 #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd)     */
153 #define _PAGE_READ_4V	  _AC(0x0800000000000000,UL) /* Readable SW Bit      */
154 #define _PAGE_WRITE_4V	  _AC(0x0400000000000000,UL) /* Writable SW Bit      */
155 #define _PAGE_SPECIAL_4V  _AC(0x0200000000000000,UL) /* Special page         */
156 #define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page            */
157 #define _PAGE_PADDR_4V	  _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13]         */
158 #define _PAGE_IE_4V	  _AC(0x0000000000001000,UL) /* Invert Endianness    */
159 #define _PAGE_E_4V	  _AC(0x0000000000000800,UL) /* side-Effect          */
160 #define _PAGE_CP_4V	  _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
161 #define _PAGE_CV_4V	  _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
162 /* Bit 9 is used to enable MCD corruption detection instead on M7 */
163 #define _PAGE_MCD_4V      _AC(0x0000000000000200,UL) /* Memory Corruption    */
164 #define _PAGE_P_4V	  _AC(0x0000000000000100,UL) /* Privileged Page      */
165 #define _PAGE_EXEC_4V	  _AC(0x0000000000000080,UL) /* Executable Page      */
166 #define _PAGE_W_4V	  _AC(0x0000000000000040,UL) /* Writable             */
167 #define _PAGE_SOFT_4V	  _AC(0x0000000000000030,UL) /* Software bits        */
168 #define _PAGE_PRESENT_4V  _AC(0x0000000000000010,UL) /* Present              */
169 #define _PAGE_RESV_4V	  _AC(0x0000000000000008,UL) /* Reserved             */
170 #define _PAGE_SZ16GB_4V	  _AC(0x0000000000000007,UL) /* 16GB Page            */
171 #define _PAGE_SZ2GB_4V	  _AC(0x0000000000000006,UL) /* 2GB Page             */
172 #define _PAGE_SZ256MB_4V  _AC(0x0000000000000005,UL) /* 256MB Page           */
173 #define _PAGE_SZ32MB_4V	  _AC(0x0000000000000004,UL) /* 32MB Page            */
174 #define _PAGE_SZ4MB_4V	  _AC(0x0000000000000003,UL) /* 4MB Page             */
175 #define _PAGE_SZ512K_4V	  _AC(0x0000000000000002,UL) /* 512K Page            */
176 #define _PAGE_SZ64K_4V	  _AC(0x0000000000000001,UL) /* 64K Page             */
177 #define _PAGE_SZ8K_4V	  _AC(0x0000000000000000,UL) /* 8K Page              */
178 #define _PAGE_SZALL_4V	  _AC(0x0000000000000007,UL) /* All pgsz bits        */
179 
180 #define _PAGE_SZBITS_4U	_PAGE_SZ8K_4U
181 #define _PAGE_SZBITS_4V	_PAGE_SZ8K_4V
182 
183 #if REAL_HPAGE_SHIFT != 22
184 #error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
185 #endif
186 
187 #define _PAGE_SZHUGE_4U	_PAGE_SZ4MB_4U
188 #define _PAGE_SZHUGE_4V	_PAGE_SZ4MB_4V
189 
190 /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
191 #define __P000	__pgprot(0)
192 #define __P001	__pgprot(0)
193 #define __P010	__pgprot(0)
194 #define __P011	__pgprot(0)
195 #define __P100	__pgprot(0)
196 #define __P101	__pgprot(0)
197 #define __P110	__pgprot(0)
198 #define __P111	__pgprot(0)
199 
200 #define __S000	__pgprot(0)
201 #define __S001	__pgprot(0)
202 #define __S010	__pgprot(0)
203 #define __S011	__pgprot(0)
204 #define __S100	__pgprot(0)
205 #define __S101	__pgprot(0)
206 #define __S110	__pgprot(0)
207 #define __S111	__pgprot(0)
208 
209 #ifndef __ASSEMBLY__
210 
211 pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
212 
213 unsigned long pte_sz_bits(unsigned long size);
214 
215 extern pgprot_t PAGE_KERNEL;
216 extern pgprot_t PAGE_KERNEL_LOCKED;
217 extern pgprot_t PAGE_COPY;
218 extern pgprot_t PAGE_SHARED;
219 
220 /* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */
221 extern unsigned long _PAGE_IE;
222 extern unsigned long _PAGE_E;
223 extern unsigned long _PAGE_CACHE;
224 
225 extern unsigned long pg_iobits;
226 extern unsigned long _PAGE_ALL_SZ_BITS;
227 
228 extern struct page *mem_map_zero;
229 #define ZERO_PAGE(vaddr)	(mem_map_zero)
230 
231 /* PFNs are real physical page numbers.  However, mem_map only begins to record
232  * per-page information starting at pfn_base.  This is to handle systems where
233  * the first physical page in the machine is at some huge physical address,
234  * such as 4GB.   This is common on a partitioned E10000, for example.
235  */
pfn_pte(unsigned long pfn,pgprot_t prot)236 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
237 {
238 	unsigned long paddr = pfn << PAGE_SHIFT;
239 
240 	BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
241 	return __pte(paddr | pgprot_val(prot));
242 }
243 #define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
244 
245 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pfn_pmd(unsigned long page_nr,pgprot_t pgprot)246 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
247 {
248 	pte_t pte = pfn_pte(page_nr, pgprot);
249 
250 	return __pmd(pte_val(pte));
251 }
252 #define mk_pmd(page, pgprot)	pfn_pmd(page_to_pfn(page), (pgprot))
253 #endif
254 
255 /* This one can be done with two shifts.  */
pte_pfn(pte_t pte)256 static inline unsigned long pte_pfn(pte_t pte)
257 {
258 	unsigned long ret;
259 
260 	__asm__ __volatile__(
261 	"\n661:	sllx		%1, %2, %0\n"
262 	"	srlx		%0, %3, %0\n"
263 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
264 	"	.word		661b\n"
265 	"	sllx		%1, %4, %0\n"
266 	"	srlx		%0, %5, %0\n"
267 	"	.previous\n"
268 	: "=r" (ret)
269 	: "r" (pte_val(pte)),
270 	  "i" (21), "i" (21 + PAGE_SHIFT),
271 	  "i" (8), "i" (8 + PAGE_SHIFT));
272 
273 	return ret;
274 }
275 #define pte_page(x) pfn_to_page(pte_pfn(x))
276 
pte_modify(pte_t pte,pgprot_t prot)277 static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
278 {
279 	unsigned long mask, tmp;
280 
281 	/* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
282 	 * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
283 	 *
284 	 * Even if we use negation tricks the result is still a 6
285 	 * instruction sequence, so don't try to play fancy and just
286 	 * do the most straightforward implementation.
287 	 *
288 	 * Note: We encode this into 3 sun4v 2-insn patch sequences.
289 	 */
290 
291 	BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
292 	__asm__ __volatile__(
293 	"\n661:	sethi		%%uhi(%2), %1\n"
294 	"	sethi		%%hi(%2), %0\n"
295 	"\n662:	or		%1, %%ulo(%2), %1\n"
296 	"	or		%0, %%lo(%2), %0\n"
297 	"\n663:	sllx		%1, 32, %1\n"
298 	"	or		%0, %1, %0\n"
299 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
300 	"	.word		661b\n"
301 	"	sethi		%%uhi(%3), %1\n"
302 	"	sethi		%%hi(%3), %0\n"
303 	"	.word		662b\n"
304 	"	or		%1, %%ulo(%3), %1\n"
305 	"	or		%0, %%lo(%3), %0\n"
306 	"	.word		663b\n"
307 	"	sllx		%1, 32, %1\n"
308 	"	or		%0, %1, %0\n"
309 	"	.previous\n"
310 	"	.section	.sun_m7_2insn_patch, \"ax\"\n"
311 	"	.word		661b\n"
312 	"	sethi		%%uhi(%4), %1\n"
313 	"	sethi		%%hi(%4), %0\n"
314 	"	.word		662b\n"
315 	"	or		%1, %%ulo(%4), %1\n"
316 	"	or		%0, %%lo(%4), %0\n"
317 	"	.word		663b\n"
318 	"	sllx		%1, 32, %1\n"
319 	"	or		%0, %1, %0\n"
320 	"	.previous\n"
321 	: "=r" (mask), "=r" (tmp)
322 	: "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
323 	       _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
324 	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
325 	  "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
326 	       _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
327 	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V),
328 	  "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
329 	       _PAGE_CP_4V | _PAGE_E_4V |
330 	       _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
331 
332 	return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
333 }
334 
335 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_modify(pmd_t pmd,pgprot_t newprot)336 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
337 {
338 	pte_t pte = __pte(pmd_val(pmd));
339 
340 	pte = pte_modify(pte, newprot);
341 
342 	return __pmd(pte_val(pte));
343 }
344 #endif
345 
pgprot_noncached(pgprot_t prot)346 static inline pgprot_t pgprot_noncached(pgprot_t prot)
347 {
348 	unsigned long val = pgprot_val(prot);
349 
350 	__asm__ __volatile__(
351 	"\n661:	andn		%0, %2, %0\n"
352 	"	or		%0, %3, %0\n"
353 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
354 	"	.word		661b\n"
355 	"	andn		%0, %4, %0\n"
356 	"	or		%0, %5, %0\n"
357 	"	.previous\n"
358 	"	.section	.sun_m7_2insn_patch, \"ax\"\n"
359 	"	.word		661b\n"
360 	"	andn		%0, %6, %0\n"
361 	"	or		%0, %5, %0\n"
362 	"	.previous\n"
363 	: "=r" (val)
364 	: "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
365 	             "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V),
366 	             "i" (_PAGE_CP_4V));
367 
368 	return __pgprot(val);
369 }
370 /* Various pieces of code check for platform support by ifdef testing
371  * on "pgprot_noncached".  That's broken and should be fixed, but for
372  * now...
373  */
374 #define pgprot_noncached pgprot_noncached
375 
376 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
377 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
378 #define arch_make_huge_pte arch_make_huge_pte
__pte_default_huge_mask(void)379 static inline unsigned long __pte_default_huge_mask(void)
380 {
381 	unsigned long mask;
382 
383 	__asm__ __volatile__(
384 	"\n661:	sethi		%%uhi(%1), %0\n"
385 	"	sllx		%0, 32, %0\n"
386 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
387 	"	.word		661b\n"
388 	"	mov		%2, %0\n"
389 	"	nop\n"
390 	"	.previous\n"
391 	: "=r" (mask)
392 	: "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
393 
394 	return mask;
395 }
396 
pte_mkhuge(pte_t pte)397 static inline pte_t pte_mkhuge(pte_t pte)
398 {
399 	return __pte(pte_val(pte) | __pte_default_huge_mask());
400 }
401 
is_default_hugetlb_pte(pte_t pte)402 static inline bool is_default_hugetlb_pte(pte_t pte)
403 {
404 	unsigned long mask = __pte_default_huge_mask();
405 
406 	return (pte_val(pte) & mask) == mask;
407 }
408 
is_hugetlb_pmd(pmd_t pmd)409 static inline bool is_hugetlb_pmd(pmd_t pmd)
410 {
411 	return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
412 }
413 
is_hugetlb_pud(pud_t pud)414 static inline bool is_hugetlb_pud(pud_t pud)
415 {
416 	return !!(pud_val(pud) & _PAGE_PUD_HUGE);
417 }
418 
419 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_mkhuge(pmd_t pmd)420 static inline pmd_t pmd_mkhuge(pmd_t pmd)
421 {
422 	pte_t pte = __pte(pmd_val(pmd));
423 
424 	pte = pte_mkhuge(pte);
425 	pte_val(pte) |= _PAGE_PMD_HUGE;
426 
427 	return __pmd(pte_val(pte));
428 }
429 #endif
430 #else
is_hugetlb_pte(pte_t pte)431 static inline bool is_hugetlb_pte(pte_t pte)
432 {
433 	return false;
434 }
435 #endif
436 
pte_mkdirty(pte_t pte)437 static inline pte_t pte_mkdirty(pte_t pte)
438 {
439 	unsigned long val = pte_val(pte), tmp;
440 
441 	__asm__ __volatile__(
442 	"\n661:	or		%0, %3, %0\n"
443 	"	nop\n"
444 	"\n662:	nop\n"
445 	"	nop\n"
446 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
447 	"	.word		661b\n"
448 	"	sethi		%%uhi(%4), %1\n"
449 	"	sllx		%1, 32, %1\n"
450 	"	.word		662b\n"
451 	"	or		%1, %%lo(%4), %1\n"
452 	"	or		%0, %1, %0\n"
453 	"	.previous\n"
454 	: "=r" (val), "=r" (tmp)
455 	: "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
456 	  "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
457 
458 	return __pte(val);
459 }
460 
pte_mkclean(pte_t pte)461 static inline pte_t pte_mkclean(pte_t pte)
462 {
463 	unsigned long val = pte_val(pte), tmp;
464 
465 	__asm__ __volatile__(
466 	"\n661:	andn		%0, %3, %0\n"
467 	"	nop\n"
468 	"\n662:	nop\n"
469 	"	nop\n"
470 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
471 	"	.word		661b\n"
472 	"	sethi		%%uhi(%4), %1\n"
473 	"	sllx		%1, 32, %1\n"
474 	"	.word		662b\n"
475 	"	or		%1, %%lo(%4), %1\n"
476 	"	andn		%0, %1, %0\n"
477 	"	.previous\n"
478 	: "=r" (val), "=r" (tmp)
479 	: "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
480 	  "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
481 
482 	return __pte(val);
483 }
484 
pte_mkwrite(pte_t pte)485 static inline pte_t pte_mkwrite(pte_t pte)
486 {
487 	unsigned long val = pte_val(pte), mask;
488 
489 	__asm__ __volatile__(
490 	"\n661:	mov		%1, %0\n"
491 	"	nop\n"
492 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
493 	"	.word		661b\n"
494 	"	sethi		%%uhi(%2), %0\n"
495 	"	sllx		%0, 32, %0\n"
496 	"	.previous\n"
497 	: "=r" (mask)
498 	: "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
499 
500 	return __pte(val | mask);
501 }
502 
pte_wrprotect(pte_t pte)503 static inline pte_t pte_wrprotect(pte_t pte)
504 {
505 	unsigned long val = pte_val(pte), tmp;
506 
507 	__asm__ __volatile__(
508 	"\n661:	andn		%0, %3, %0\n"
509 	"	nop\n"
510 	"\n662:	nop\n"
511 	"	nop\n"
512 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
513 	"	.word		661b\n"
514 	"	sethi		%%uhi(%4), %1\n"
515 	"	sllx		%1, 32, %1\n"
516 	"	.word		662b\n"
517 	"	or		%1, %%lo(%4), %1\n"
518 	"	andn		%0, %1, %0\n"
519 	"	.previous\n"
520 	: "=r" (val), "=r" (tmp)
521 	: "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
522 	  "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
523 
524 	return __pte(val);
525 }
526 
pte_mkold(pte_t pte)527 static inline pte_t pte_mkold(pte_t pte)
528 {
529 	unsigned long mask;
530 
531 	__asm__ __volatile__(
532 	"\n661:	mov		%1, %0\n"
533 	"	nop\n"
534 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
535 	"	.word		661b\n"
536 	"	sethi		%%uhi(%2), %0\n"
537 	"	sllx		%0, 32, %0\n"
538 	"	.previous\n"
539 	: "=r" (mask)
540 	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
541 
542 	mask |= _PAGE_R;
543 
544 	return __pte(pte_val(pte) & ~mask);
545 }
546 
pte_mkyoung(pte_t pte)547 static inline pte_t pte_mkyoung(pte_t pte)
548 {
549 	unsigned long mask;
550 
551 	__asm__ __volatile__(
552 	"\n661:	mov		%1, %0\n"
553 	"	nop\n"
554 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
555 	"	.word		661b\n"
556 	"	sethi		%%uhi(%2), %0\n"
557 	"	sllx		%0, 32, %0\n"
558 	"	.previous\n"
559 	: "=r" (mask)
560 	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
561 
562 	mask |= _PAGE_R;
563 
564 	return __pte(pte_val(pte) | mask);
565 }
566 
pte_mkspecial(pte_t pte)567 static inline pte_t pte_mkspecial(pte_t pte)
568 {
569 	pte_val(pte) |= _PAGE_SPECIAL;
570 	return pte;
571 }
572 
pte_mkmcd(pte_t pte)573 static inline pte_t pte_mkmcd(pte_t pte)
574 {
575 	pte_val(pte) |= _PAGE_MCD_4V;
576 	return pte;
577 }
578 
pte_mknotmcd(pte_t pte)579 static inline pte_t pte_mknotmcd(pte_t pte)
580 {
581 	pte_val(pte) &= ~_PAGE_MCD_4V;
582 	return pte;
583 }
584 
pte_young(pte_t pte)585 static inline unsigned long pte_young(pte_t pte)
586 {
587 	unsigned long mask;
588 
589 	__asm__ __volatile__(
590 	"\n661:	mov		%1, %0\n"
591 	"	nop\n"
592 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
593 	"	.word		661b\n"
594 	"	sethi		%%uhi(%2), %0\n"
595 	"	sllx		%0, 32, %0\n"
596 	"	.previous\n"
597 	: "=r" (mask)
598 	: "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
599 
600 	return (pte_val(pte) & mask);
601 }
602 
pte_dirty(pte_t pte)603 static inline unsigned long pte_dirty(pte_t pte)
604 {
605 	unsigned long mask;
606 
607 	__asm__ __volatile__(
608 	"\n661:	mov		%1, %0\n"
609 	"	nop\n"
610 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
611 	"	.word		661b\n"
612 	"	sethi		%%uhi(%2), %0\n"
613 	"	sllx		%0, 32, %0\n"
614 	"	.previous\n"
615 	: "=r" (mask)
616 	: "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
617 
618 	return (pte_val(pte) & mask);
619 }
620 
pte_write(pte_t pte)621 static inline unsigned long pte_write(pte_t pte)
622 {
623 	unsigned long mask;
624 
625 	__asm__ __volatile__(
626 	"\n661:	mov		%1, %0\n"
627 	"	nop\n"
628 	"	.section	.sun4v_2insn_patch, \"ax\"\n"
629 	"	.word		661b\n"
630 	"	sethi		%%uhi(%2), %0\n"
631 	"	sllx		%0, 32, %0\n"
632 	"	.previous\n"
633 	: "=r" (mask)
634 	: "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
635 
636 	return (pte_val(pte) & mask);
637 }
638 
pte_exec(pte_t pte)639 static inline unsigned long pte_exec(pte_t pte)
640 {
641 	unsigned long mask;
642 
643 	__asm__ __volatile__(
644 	"\n661:	sethi		%%hi(%1), %0\n"
645 	"	.section	.sun4v_1insn_patch, \"ax\"\n"
646 	"	.word		661b\n"
647 	"	mov		%2, %0\n"
648 	"	.previous\n"
649 	: "=r" (mask)
650 	: "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
651 
652 	return (pte_val(pte) & mask);
653 }
654 
pte_present(pte_t pte)655 static inline unsigned long pte_present(pte_t pte)
656 {
657 	unsigned long val = pte_val(pte);
658 
659 	__asm__ __volatile__(
660 	"\n661:	and		%0, %2, %0\n"
661 	"	.section	.sun4v_1insn_patch, \"ax\"\n"
662 	"	.word		661b\n"
663 	"	and		%0, %3, %0\n"
664 	"	.previous\n"
665 	: "=r" (val)
666 	: "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
667 
668 	return val;
669 }
670 
671 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)672 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
673 {
674 	return pte_val(a) & _PAGE_VALID;
675 }
676 
pte_special(pte_t pte)677 static inline unsigned long pte_special(pte_t pte)
678 {
679 	return pte_val(pte) & _PAGE_SPECIAL;
680 }
681 
682 #define pmd_leaf	pmd_large
pmd_large(pmd_t pmd)683 static inline unsigned long pmd_large(pmd_t pmd)
684 {
685 	pte_t pte = __pte(pmd_val(pmd));
686 
687 	return pte_val(pte) & _PAGE_PMD_HUGE;
688 }
689 
pmd_pfn(pmd_t pmd)690 static inline unsigned long pmd_pfn(pmd_t pmd)
691 {
692 	pte_t pte = __pte(pmd_val(pmd));
693 
694 	return pte_pfn(pte);
695 }
696 
697 #define pmd_write pmd_write
pmd_write(pmd_t pmd)698 static inline unsigned long pmd_write(pmd_t pmd)
699 {
700 	pte_t pte = __pte(pmd_val(pmd));
701 
702 	return pte_write(pte);
703 }
704 
705 #define pud_write(pud)	pte_write(__pte(pud_val(pud)))
706 
707 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_dirty(pmd_t pmd)708 static inline unsigned long pmd_dirty(pmd_t pmd)
709 {
710 	pte_t pte = __pte(pmd_val(pmd));
711 
712 	return pte_dirty(pte);
713 }
714 
pmd_young(pmd_t pmd)715 static inline unsigned long pmd_young(pmd_t pmd)
716 {
717 	pte_t pte = __pte(pmd_val(pmd));
718 
719 	return pte_young(pte);
720 }
721 
pmd_trans_huge(pmd_t pmd)722 static inline unsigned long pmd_trans_huge(pmd_t pmd)
723 {
724 	pte_t pte = __pte(pmd_val(pmd));
725 
726 	return pte_val(pte) & _PAGE_PMD_HUGE;
727 }
728 
pmd_mkold(pmd_t pmd)729 static inline pmd_t pmd_mkold(pmd_t pmd)
730 {
731 	pte_t pte = __pte(pmd_val(pmd));
732 
733 	pte = pte_mkold(pte);
734 
735 	return __pmd(pte_val(pte));
736 }
737 
pmd_wrprotect(pmd_t pmd)738 static inline pmd_t pmd_wrprotect(pmd_t pmd)
739 {
740 	pte_t pte = __pte(pmd_val(pmd));
741 
742 	pte = pte_wrprotect(pte);
743 
744 	return __pmd(pte_val(pte));
745 }
746 
pmd_mkdirty(pmd_t pmd)747 static inline pmd_t pmd_mkdirty(pmd_t pmd)
748 {
749 	pte_t pte = __pte(pmd_val(pmd));
750 
751 	pte = pte_mkdirty(pte);
752 
753 	return __pmd(pte_val(pte));
754 }
755 
pmd_mkclean(pmd_t pmd)756 static inline pmd_t pmd_mkclean(pmd_t pmd)
757 {
758 	pte_t pte = __pte(pmd_val(pmd));
759 
760 	pte = pte_mkclean(pte);
761 
762 	return __pmd(pte_val(pte));
763 }
764 
pmd_mkyoung(pmd_t pmd)765 static inline pmd_t pmd_mkyoung(pmd_t pmd)
766 {
767 	pte_t pte = __pte(pmd_val(pmd));
768 
769 	pte = pte_mkyoung(pte);
770 
771 	return __pmd(pte_val(pte));
772 }
773 
pmd_mkwrite(pmd_t pmd)774 static inline pmd_t pmd_mkwrite(pmd_t pmd)
775 {
776 	pte_t pte = __pte(pmd_val(pmd));
777 
778 	pte = pte_mkwrite(pte);
779 
780 	return __pmd(pte_val(pte));
781 }
782 
pmd_pgprot(pmd_t entry)783 static inline pgprot_t pmd_pgprot(pmd_t entry)
784 {
785 	unsigned long val = pmd_val(entry);
786 
787 	return __pgprot(val);
788 }
789 #endif
790 
pmd_present(pmd_t pmd)791 static inline int pmd_present(pmd_t pmd)
792 {
793 	return pmd_val(pmd) != 0UL;
794 }
795 
796 #define pmd_none(pmd)			(!pmd_val(pmd))
797 
798 /* pmd_bad() is only called on non-trans-huge PMDs.  Our encoding is
799  * very simple, it's just the physical address.  PTE tables are of
800  * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
801  * the top bits outside of the range of any physical address size we
802  * support are clear as well.  We also validate the physical itself.
803  */
804 #define pmd_bad(pmd)			(pmd_val(pmd) & ~PAGE_MASK)
805 
806 #define pud_none(pud)			(!pud_val(pud))
807 
808 #define pud_bad(pud)			(pud_val(pud) & ~PAGE_MASK)
809 
810 #define p4d_none(p4d)			(!p4d_val(p4d))
811 
812 #define p4d_bad(p4d)			(p4d_val(p4d) & ~PAGE_MASK)
813 
814 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
815 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
816 		pmd_t *pmdp, pmd_t pmd);
817 #else
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)818 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
819 			      pmd_t *pmdp, pmd_t pmd)
820 {
821 	*pmdp = pmd;
822 }
823 #endif
824 
pmd_set(struct mm_struct * mm,pmd_t * pmdp,pte_t * ptep)825 static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
826 {
827 	unsigned long val = __pa((unsigned long) (ptep));
828 
829 	pmd_val(*pmdp) = val;
830 }
831 
832 #define pud_set(pudp, pmdp)	\
833 	(pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
pmd_page_vaddr(pmd_t pmd)834 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
835 {
836 	pte_t pte = __pte(pmd_val(pmd));
837 	unsigned long pfn;
838 
839 	pfn = pte_pfn(pte);
840 
841 	return ((unsigned long) __va(pfn << PAGE_SHIFT));
842 }
843 
pud_pgtable(pud_t pud)844 static inline pmd_t *pud_pgtable(pud_t pud)
845 {
846 	pte_t pte = __pte(pud_val(pud));
847 	unsigned long pfn;
848 
849 	pfn = pte_pfn(pte);
850 
851 	return ((pmd_t *) __va(pfn << PAGE_SHIFT));
852 }
853 
854 #define pmd_page(pmd) 			virt_to_page((void *)pmd_page_vaddr(pmd))
855 #define pud_page(pud)			virt_to_page((void *)pud_pgtable(pud))
856 #define pmd_clear(pmdp)			(pmd_val(*(pmdp)) = 0UL)
857 #define pud_present(pud)		(pud_val(pud) != 0U)
858 #define pud_clear(pudp)			(pud_val(*(pudp)) = 0UL)
859 #define p4d_pgtable(p4d)		\
860 	((pud_t *) __va(p4d_val(p4d)))
861 #define p4d_present(p4d)		(p4d_val(p4d) != 0U)
862 #define p4d_clear(p4dp)			(p4d_val(*(p4dp)) = 0UL)
863 
864 /* only used by the stubbed out hugetlb gup code, should never be called */
865 #define p4d_page(p4d)			NULL
866 
867 #define pud_leaf	pud_large
pud_large(pud_t pud)868 static inline unsigned long pud_large(pud_t pud)
869 {
870 	pte_t pte = __pte(pud_val(pud));
871 
872 	return pte_val(pte) & _PAGE_PMD_HUGE;
873 }
874 
pud_pfn(pud_t pud)875 static inline unsigned long pud_pfn(pud_t pud)
876 {
877 	pte_t pte = __pte(pud_val(pud));
878 
879 	return pte_pfn(pte);
880 }
881 
882 /* Same in both SUN4V and SUN4U.  */
883 #define pte_none(pte) 			(!pte_val(pte))
884 
885 #define p4d_set(p4dp, pudp)	\
886 	(p4d_val(*(p4dp)) = (__pa((unsigned long) (pudp))))
887 
888 /* We cannot include <linux/mm_types.h> at this point yet: */
889 extern struct mm_struct init_mm;
890 
891 /* Actual page table PTE updates.  */
892 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
893 		   pte_t *ptep, pte_t orig, int fullmm,
894 		   unsigned int hugepage_shift);
895 
maybe_tlb_batch_add(struct mm_struct * mm,unsigned long vaddr,pte_t * ptep,pte_t orig,int fullmm,unsigned int hugepage_shift)896 static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
897 				pte_t *ptep, pte_t orig, int fullmm,
898 				unsigned int hugepage_shift)
899 {
900 	/* It is more efficient to let flush_tlb_kernel_range()
901 	 * handle init_mm tlb flushes.
902 	 *
903 	 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
904 	 *             and SUN4V pte layout, so this inline test is fine.
905 	 */
906 	if (likely(mm != &init_mm) && pte_accessible(mm, orig))
907 		tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift);
908 }
909 
910 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)911 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
912 					    unsigned long addr,
913 					    pmd_t *pmdp)
914 {
915 	pmd_t pmd = *pmdp;
916 	set_pmd_at(mm, addr, pmdp, __pmd(0UL));
917 	return pmd;
918 }
919 
__set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,int fullmm)920 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
921 			     pte_t *ptep, pte_t pte, int fullmm)
922 {
923 	pte_t orig = *ptep;
924 
925 	*ptep = pte;
926 	maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
927 }
928 
929 #define set_pte_at(mm,addr,ptep,pte)	\
930 	__set_pte_at((mm), (addr), (ptep), (pte), 0)
931 
932 #define pte_clear(mm,addr,ptep)		\
933 	set_pte_at((mm), (addr), (ptep), __pte(0UL))
934 
935 #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
936 #define pte_clear_not_present_full(mm,addr,ptep,fullmm)	\
937 	__set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
938 
939 #ifdef DCACHE_ALIASING_POSSIBLE
940 #define __HAVE_ARCH_MOVE_PTE
941 #define move_pte(pte, prot, old_addr, new_addr)				\
942 ({									\
943 	pte_t newpte = (pte);						\
944 	if (tlb_type != hypervisor && pte_present(pte)) {		\
945 		unsigned long this_pfn = pte_pfn(pte);			\
946 									\
947 		if (pfn_valid(this_pfn) &&				\
948 		    (((old_addr) ^ (new_addr)) & (1 << 13)))		\
949 			flush_dcache_page_all(current->mm,		\
950 					      pfn_to_page(this_pfn));	\
951 	}								\
952 	newpte;								\
953 })
954 #endif
955 
956 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
957 
958 void paging_init(void);
959 unsigned long find_ecache_flush_span(unsigned long size);
960 
961 struct seq_file;
962 void mmu_info(struct seq_file *);
963 
964 struct vm_area_struct;
965 void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
966 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
967 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
968 			  pmd_t *pmd);
969 
970 #define __HAVE_ARCH_PMDP_INVALIDATE
971 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
972 			    pmd_t *pmdp);
973 
974 #define __HAVE_ARCH_PGTABLE_DEPOSIT
975 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
976 				pgtable_t pgtable);
977 
978 #define __HAVE_ARCH_PGTABLE_WITHDRAW
979 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
980 #endif
981 
982 /* Encode and de-code a swap entry */
983 #define __swp_type(entry)	(((entry).val >> PAGE_SHIFT) & 0xffUL)
984 #define __swp_offset(entry)	((entry).val >> (PAGE_SHIFT + 8UL))
985 #define __swp_entry(type, offset)	\
986 	( (swp_entry_t) \
987 	  { \
988 		(((long)(type) << PAGE_SHIFT) | \
989                  ((long)(offset) << (PAGE_SHIFT + 8UL))) \
990 	  } )
991 #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
992 #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
993 
994 int page_in_phys_avail(unsigned long paddr);
995 
996 /*
997  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
998  * its high 4 bits.  These macros/functions put it there or get it from there.
999  */
1000 #define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
1001 #define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
1002 #define GET_PFN(pfn)			(pfn & 0x0fffffffffffffffUL)
1003 
1004 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
1005 		    unsigned long, pgprot_t);
1006 
1007 void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
1008 		      unsigned long addr, pte_t pte);
1009 
1010 int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
1011 		  unsigned long addr, pte_t oldpte);
1012 
1013 #define __HAVE_ARCH_DO_SWAP_PAGE
arch_do_swap_page(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t pte,pte_t oldpte)1014 static inline void arch_do_swap_page(struct mm_struct *mm,
1015 				     struct vm_area_struct *vma,
1016 				     unsigned long addr,
1017 				     pte_t pte, pte_t oldpte)
1018 {
1019 	/* If this is a new page being mapped in, there can be no
1020 	 * ADI tags stored away for this page. Skip looking for
1021 	 * stored tags
1022 	 */
1023 	if (pte_none(oldpte))
1024 		return;
1025 
1026 	if (adi_state.enabled && (pte_val(pte) & _PAGE_MCD_4V))
1027 		adi_restore_tags(mm, vma, addr, pte);
1028 }
1029 
1030 #define __HAVE_ARCH_UNMAP_ONE
arch_unmap_one(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t oldpte)1031 static inline int arch_unmap_one(struct mm_struct *mm,
1032 				 struct vm_area_struct *vma,
1033 				 unsigned long addr, pte_t oldpte)
1034 {
1035 	if (adi_state.enabled && (pte_val(oldpte) & _PAGE_MCD_4V))
1036 		return adi_save_tags(mm, vma, addr, oldpte);
1037 	return 0;
1038 }
1039 
io_remap_pfn_range(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)1040 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
1041 				     unsigned long from, unsigned long pfn,
1042 				     unsigned long size, pgprot_t prot)
1043 {
1044 	unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
1045 	int space = GET_IOSPACE(pfn);
1046 	unsigned long phys_base;
1047 
1048 	phys_base = offset | (((unsigned long) space) << 32UL);
1049 
1050 	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
1051 }
1052 #define io_remap_pfn_range io_remap_pfn_range
1053 
__untagged_addr(unsigned long start)1054 static inline unsigned long __untagged_addr(unsigned long start)
1055 {
1056 	if (adi_capable()) {
1057 		long addr = start;
1058 
1059 		/* If userspace has passed a versioned address, kernel
1060 		 * will not find it in the VMAs since it does not store
1061 		 * the version tags in the list of VMAs. Storing version
1062 		 * tags in list of VMAs is impractical since they can be
1063 		 * changed any time from userspace without dropping into
1064 		 * kernel. Any address search in VMAs will be done with
1065 		 * non-versioned addresses. Ensure the ADI version bits
1066 		 * are dropped here by sign extending the last bit before
1067 		 * ADI bits. IOMMU does not implement version tags.
1068 		 */
1069 		return (addr << (long)adi_nbits()) >> (long)adi_nbits();
1070 	}
1071 
1072 	return start;
1073 }
1074 #define untagged_addr(addr) \
1075 	((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
1076 
pte_access_permitted(pte_t pte,bool write)1077 static inline bool pte_access_permitted(pte_t pte, bool write)
1078 {
1079 	u64 prot;
1080 
1081 	if (tlb_type == hypervisor) {
1082 		prot = _PAGE_PRESENT_4V | _PAGE_P_4V;
1083 		if (write)
1084 			prot |= _PAGE_WRITE_4V;
1085 	} else {
1086 		prot = _PAGE_PRESENT_4U | _PAGE_P_4U;
1087 		if (write)
1088 			prot |= _PAGE_WRITE_4U;
1089 	}
1090 
1091 	return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
1092 }
1093 #define pte_access_permitted pte_access_permitted
1094 
1095 #include <asm/tlbflush.h>
1096 
1097 /* We provide our own get_unmapped_area to cope with VA holes and
1098  * SHM area cache aliasing for userland.
1099  */
1100 #define HAVE_ARCH_UNMAPPED_AREA
1101 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1102 
1103 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
1104  * the largest alignment possible such that larget PTEs can be used.
1105  */
1106 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
1107 				   unsigned long, unsigned long,
1108 				   unsigned long);
1109 #define HAVE_ARCH_FB_UNMAPPED_AREA
1110 
1111 void sun4v_register_fault_status(void);
1112 void sun4v_ktsb_register(void);
1113 void __init cheetah_ecache_flush_init(void);
1114 void sun4v_patch_tlb_handlers(void);
1115 
1116 extern unsigned long cmdline_memory_size;
1117 
1118 asmlinkage void do_sparc64_fault(struct pt_regs *regs);
1119 
1120 #define pmd_pgtable(PMD)	((pte_t *)pmd_page_vaddr(PMD))
1121 
1122 #ifdef CONFIG_HUGETLB_PAGE
1123 
1124 #define pud_leaf_size pud_leaf_size
1125 extern unsigned long pud_leaf_size(pud_t pud);
1126 
1127 #define pmd_leaf_size pmd_leaf_size
1128 extern unsigned long pmd_leaf_size(pmd_t pmd);
1129 
1130 #define pte_leaf_size pte_leaf_size
1131 extern unsigned long pte_leaf_size(pte_t pte);
1132 
1133 #endif /* CONFIG_HUGETLB_PAGE */
1134 
1135 #endif /* !(__ASSEMBLY__) */
1136 
1137 #endif /* !(_SPARC64_PGTABLE_H) */
1138