1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * pgtable.h: SpitFire page table operations.
4 *
5 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9 #ifndef _SPARC64_PGTABLE_H
10 #define _SPARC64_PGTABLE_H
11
12 /* This file contains the functions and defines necessary to modify and use
13 * the SpitFire page tables.
14 */
15
16 #include <asm-generic/pgtable-nop4d.h>
17 #include <linux/compiler.h>
18 #include <linux/const.h>
19 #include <asm/types.h>
20 #include <asm/spitfire.h>
21 #include <asm/asi.h>
22 #include <asm/adi.h>
23 #include <asm/page.h>
24 #include <asm/processor.h>
25
26 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
27 * The page copy blockops can use 0x6000000 to 0x8000000.
28 * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
29 * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
30 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
31 * The vmalloc area spans 0x100000000 to 0x200000000.
32 * Since modules need to be in the lowest 32-bits of the address space,
33 * we place them right before the OBP area from 0x10000000 to 0xf0000000.
34 * There is a single static kernel PMD which maps from 0x0 to address
35 * 0x400000000.
36 */
37 #define TLBTEMP_BASE _AC(0x0000000006000000,UL)
38 #define TSBMAP_8K_BASE _AC(0x0000000008000000,UL)
39 #define TSBMAP_4M_BASE _AC(0x0000000008400000,UL)
40 #define MODULES_VADDR _AC(0x0000000010000000,UL)
41 #define MODULES_LEN _AC(0x00000000e0000000,UL)
42 #define MODULES_END _AC(0x00000000f0000000,UL)
43 #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
44 #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
45 #define VMALLOC_START _AC(0x0000000100000000,UL)
46 #define VMEMMAP_BASE VMALLOC_END
47
48 /* PMD_SHIFT determines the size of the area a second-level page
49 * table can map
50 */
51 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
52 #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
53 #define PMD_MASK (~(PMD_SIZE-1))
54 #define PMD_BITS (PAGE_SHIFT - 3)
55
56 /* PUD_SHIFT determines the size of the area a third-level page
57 * table can map
58 */
59 #define PUD_SHIFT (PMD_SHIFT + PMD_BITS)
60 #define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
61 #define PUD_MASK (~(PUD_SIZE-1))
62 #define PUD_BITS (PAGE_SHIFT - 3)
63
64 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
65 #define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS)
66 #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
67 #define PGDIR_MASK (~(PGDIR_SIZE-1))
68 #define PGDIR_BITS (PAGE_SHIFT - 3)
69
70 #if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
71 #error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
72 #endif
73
74 #if (PGDIR_SHIFT + PGDIR_BITS) != 53
75 #error Page table parameters do not cover virtual address space properly.
76 #endif
77
78 #if (PMD_SHIFT != HPAGE_SHIFT)
79 #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
80 #endif
81
82 #ifndef __ASSEMBLY__
83
84 extern unsigned long VMALLOC_END;
85
86 #define vmemmap ((struct page *)VMEMMAP_BASE)
87
88 #include <linux/sched.h>
89 #include <asm/tlbflush.h>
90
91 bool kern_addr_valid(unsigned long addr);
92
93 /* Entries per page directory level. */
94 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
95 #define PTRS_PER_PMD (1UL << PMD_BITS)
96 #define PTRS_PER_PUD (1UL << PUD_BITS)
97 #define PTRS_PER_PGD (1UL << PGDIR_BITS)
98
99 #define pmd_ERROR(e) \
100 pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
101 __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
102 #define pud_ERROR(e) \
103 pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \
104 __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
105 #define pgd_ERROR(e) \
106 pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
107 __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
108
109 #endif /* !(__ASSEMBLY__) */
110
111 /* PTE bits which are the same in SUN4U and SUN4V format. */
112 #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
113 #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
114 #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */
115 #define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */
116 #define _PAGE_PUD_HUGE _PAGE_PMD_HUGE
117
118 /* SUN4U pte bits... */
119 #define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
120 #define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */
121 #define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */
122 #define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */
123 #define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */
124 #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
125 #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
126 #define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */
127 #define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page */
128 #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
129 #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
130 #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
131 #define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */
132 #define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
133 #define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */
134 #define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */
135 #define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
136 #define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
137 #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
138 #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
139 #define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
140 #define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
141 #define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */
142 #define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */
143 #define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
144 #define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
145 #define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */
146 #define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */
147 #define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */
148
149 /* SUN4V pte bits... */
150 #define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */
151 #define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
152 #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */
153 #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */
154 #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
155 #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
156 #define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */
157 #define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page */
158 #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
159 #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
160 #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
161 #define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
162 #define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
163 /* Bit 9 is used to enable MCD corruption detection instead on M7 */
164 #define _PAGE_MCD_4V _AC(0x0000000000000200,UL) /* Memory Corruption */
165 #define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */
166 #define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
167 #define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
168 #define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
169 #define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
170 #define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
171 #define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
172 #define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */
173 #define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */
174 #define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */
175 #define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */
176 #define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */
177 #define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */
178 #define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */
179 #define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */
180
181 #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
182 #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
183
184 #if REAL_HPAGE_SHIFT != 22
185 #error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
186 #endif
187
188 #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
189 #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
190
191 /* We borrow bit 20 to store the exclusive marker in swap PTEs. */
192 #define _PAGE_SWP_EXCLUSIVE _AC(0x0000000000100000, UL)
193
194 #ifndef __ASSEMBLY__
195
196 pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
197
198 unsigned long pte_sz_bits(unsigned long size);
199
200 extern pgprot_t PAGE_KERNEL;
201 extern pgprot_t PAGE_KERNEL_LOCKED;
202 extern pgprot_t PAGE_COPY;
203 extern pgprot_t PAGE_SHARED;
204
205 /* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */
206 extern unsigned long _PAGE_IE;
207 extern unsigned long _PAGE_E;
208 extern unsigned long _PAGE_CACHE;
209
210 extern unsigned long pg_iobits;
211 extern unsigned long _PAGE_ALL_SZ_BITS;
212
213 extern struct page *mem_map_zero;
214 #define ZERO_PAGE(vaddr) (mem_map_zero)
215
216 /* PFNs are real physical page numbers. However, mem_map only begins to record
217 * per-page information starting at pfn_base. This is to handle systems where
218 * the first physical page in the machine is at some huge physical address,
219 * such as 4GB. This is common on a partitioned E10000, for example.
220 */
pfn_pte(unsigned long pfn,pgprot_t prot)221 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
222 {
223 unsigned long paddr = pfn << PAGE_SHIFT;
224
225 BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
226 return __pte(paddr | pgprot_val(prot));
227 }
228 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
229
230 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pfn_pmd(unsigned long page_nr,pgprot_t pgprot)231 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
232 {
233 pte_t pte = pfn_pte(page_nr, pgprot);
234
235 return __pmd(pte_val(pte));
236 }
237 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
238 #endif
239
240 /* This one can be done with two shifts. */
pte_pfn(pte_t pte)241 static inline unsigned long pte_pfn(pte_t pte)
242 {
243 unsigned long ret;
244
245 __asm__ __volatile__(
246 "\n661: sllx %1, %2, %0\n"
247 " srlx %0, %3, %0\n"
248 " .section .sun4v_2insn_patch, \"ax\"\n"
249 " .word 661b\n"
250 " sllx %1, %4, %0\n"
251 " srlx %0, %5, %0\n"
252 " .previous\n"
253 : "=r" (ret)
254 : "r" (pte_val(pte)),
255 "i" (21), "i" (21 + PAGE_SHIFT),
256 "i" (8), "i" (8 + PAGE_SHIFT));
257
258 return ret;
259 }
260 #define pte_page(x) pfn_to_page(pte_pfn(x))
261
pte_modify(pte_t pte,pgprot_t prot)262 static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
263 {
264 unsigned long mask, tmp;
265
266 /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
267 * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
268 *
269 * Even if we use negation tricks the result is still a 6
270 * instruction sequence, so don't try to play fancy and just
271 * do the most straightforward implementation.
272 *
273 * Note: We encode this into 3 sun4v 2-insn patch sequences.
274 */
275
276 BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
277 __asm__ __volatile__(
278 "\n661: sethi %%uhi(%2), %1\n"
279 " sethi %%hi(%2), %0\n"
280 "\n662: or %1, %%ulo(%2), %1\n"
281 " or %0, %%lo(%2), %0\n"
282 "\n663: sllx %1, 32, %1\n"
283 " or %0, %1, %0\n"
284 " .section .sun4v_2insn_patch, \"ax\"\n"
285 " .word 661b\n"
286 " sethi %%uhi(%3), %1\n"
287 " sethi %%hi(%3), %0\n"
288 " .word 662b\n"
289 " or %1, %%ulo(%3), %1\n"
290 " or %0, %%lo(%3), %0\n"
291 " .word 663b\n"
292 " sllx %1, 32, %1\n"
293 " or %0, %1, %0\n"
294 " .previous\n"
295 " .section .sun_m7_2insn_patch, \"ax\"\n"
296 " .word 661b\n"
297 " sethi %%uhi(%4), %1\n"
298 " sethi %%hi(%4), %0\n"
299 " .word 662b\n"
300 " or %1, %%ulo(%4), %1\n"
301 " or %0, %%lo(%4), %0\n"
302 " .word 663b\n"
303 " sllx %1, 32, %1\n"
304 " or %0, %1, %0\n"
305 " .previous\n"
306 : "=r" (mask), "=r" (tmp)
307 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
308 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
309 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
310 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
311 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
312 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V),
313 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
314 _PAGE_CP_4V | _PAGE_E_4V |
315 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
316
317 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
318 }
319
320 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_modify(pmd_t pmd,pgprot_t newprot)321 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
322 {
323 pte_t pte = __pte(pmd_val(pmd));
324
325 pte = pte_modify(pte, newprot);
326
327 return __pmd(pte_val(pte));
328 }
329 #endif
330
pgprot_noncached(pgprot_t prot)331 static inline pgprot_t pgprot_noncached(pgprot_t prot)
332 {
333 unsigned long val = pgprot_val(prot);
334
335 __asm__ __volatile__(
336 "\n661: andn %0, %2, %0\n"
337 " or %0, %3, %0\n"
338 " .section .sun4v_2insn_patch, \"ax\"\n"
339 " .word 661b\n"
340 " andn %0, %4, %0\n"
341 " or %0, %5, %0\n"
342 " .previous\n"
343 " .section .sun_m7_2insn_patch, \"ax\"\n"
344 " .word 661b\n"
345 " andn %0, %6, %0\n"
346 " or %0, %5, %0\n"
347 " .previous\n"
348 : "=r" (val)
349 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
350 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V),
351 "i" (_PAGE_CP_4V));
352
353 return __pgprot(val);
354 }
355 /* Various pieces of code check for platform support by ifdef testing
356 * on "pgprot_noncached". That's broken and should be fixed, but for
357 * now...
358 */
359 #define pgprot_noncached pgprot_noncached
360
pte_dirty(pte_t pte)361 static inline unsigned long pte_dirty(pte_t pte)
362 {
363 unsigned long mask;
364
365 __asm__ __volatile__(
366 "\n661: mov %1, %0\n"
367 " nop\n"
368 " .section .sun4v_2insn_patch, \"ax\"\n"
369 " .word 661b\n"
370 " sethi %%uhi(%2), %0\n"
371 " sllx %0, 32, %0\n"
372 " .previous\n"
373 : "=r" (mask)
374 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
375
376 return (pte_val(pte) & mask);
377 }
378
pte_write(pte_t pte)379 static inline unsigned long pte_write(pte_t pte)
380 {
381 unsigned long mask;
382
383 __asm__ __volatile__(
384 "\n661: mov %1, %0\n"
385 " nop\n"
386 " .section .sun4v_2insn_patch, \"ax\"\n"
387 " .word 661b\n"
388 " sethi %%uhi(%2), %0\n"
389 " sllx %0, 32, %0\n"
390 " .previous\n"
391 : "=r" (mask)
392 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
393
394 return (pte_val(pte) & mask);
395 }
396
397 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
398 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
399 #define arch_make_huge_pte arch_make_huge_pte
__pte_default_huge_mask(void)400 static inline unsigned long __pte_default_huge_mask(void)
401 {
402 unsigned long mask;
403
404 __asm__ __volatile__(
405 "\n661: sethi %%uhi(%1), %0\n"
406 " sllx %0, 32, %0\n"
407 " .section .sun4v_2insn_patch, \"ax\"\n"
408 " .word 661b\n"
409 " mov %2, %0\n"
410 " nop\n"
411 " .previous\n"
412 : "=r" (mask)
413 : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
414
415 return mask;
416 }
417
pte_mkhuge(pte_t pte)418 static inline pte_t pte_mkhuge(pte_t pte)
419 {
420 return __pte(pte_val(pte) | __pte_default_huge_mask());
421 }
422
is_default_hugetlb_pte(pte_t pte)423 static inline bool is_default_hugetlb_pte(pte_t pte)
424 {
425 unsigned long mask = __pte_default_huge_mask();
426
427 return (pte_val(pte) & mask) == mask;
428 }
429
is_hugetlb_pmd(pmd_t pmd)430 static inline bool is_hugetlb_pmd(pmd_t pmd)
431 {
432 return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
433 }
434
is_hugetlb_pud(pud_t pud)435 static inline bool is_hugetlb_pud(pud_t pud)
436 {
437 return !!(pud_val(pud) & _PAGE_PUD_HUGE);
438 }
439
440 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_mkhuge(pmd_t pmd)441 static inline pmd_t pmd_mkhuge(pmd_t pmd)
442 {
443 pte_t pte = __pte(pmd_val(pmd));
444
445 pte = pte_mkhuge(pte);
446 pte_val(pte) |= _PAGE_PMD_HUGE;
447
448 return __pmd(pte_val(pte));
449 }
450 #endif
451 #else
is_hugetlb_pte(pte_t pte)452 static inline bool is_hugetlb_pte(pte_t pte)
453 {
454 return false;
455 }
456 #endif
457
__pte_mkhwwrite(pte_t pte)458 static inline pte_t __pte_mkhwwrite(pte_t pte)
459 {
460 unsigned long val = pte_val(pte);
461
462 /*
463 * Note: we only want to set the HW writable bit if the SW writable bit
464 * and the SW dirty bit are set.
465 */
466 __asm__ __volatile__(
467 "\n661: or %0, %2, %0\n"
468 " .section .sun4v_1insn_patch, \"ax\"\n"
469 " .word 661b\n"
470 " or %0, %3, %0\n"
471 " .previous\n"
472 : "=r" (val)
473 : "0" (val), "i" (_PAGE_W_4U), "i" (_PAGE_W_4V));
474
475 return __pte(val);
476 }
477
pte_mkdirty(pte_t pte)478 static inline pte_t pte_mkdirty(pte_t pte)
479 {
480 unsigned long val = pte_val(pte), mask;
481
482 __asm__ __volatile__(
483 "\n661: mov %1, %0\n"
484 " nop\n"
485 " .section .sun4v_2insn_patch, \"ax\"\n"
486 " .word 661b\n"
487 " sethi %%uhi(%2), %0\n"
488 " sllx %0, 32, %0\n"
489 " .previous\n"
490 : "=r" (mask)
491 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
492
493 pte = __pte(val | mask);
494 return pte_write(pte) ? __pte_mkhwwrite(pte) : pte;
495 }
496
pte_mkclean(pte_t pte)497 static inline pte_t pte_mkclean(pte_t pte)
498 {
499 unsigned long val = pte_val(pte), tmp;
500
501 __asm__ __volatile__(
502 "\n661: andn %0, %3, %0\n"
503 " nop\n"
504 "\n662: nop\n"
505 " nop\n"
506 " .section .sun4v_2insn_patch, \"ax\"\n"
507 " .word 661b\n"
508 " sethi %%uhi(%4), %1\n"
509 " sllx %1, 32, %1\n"
510 " .word 662b\n"
511 " or %1, %%lo(%4), %1\n"
512 " andn %0, %1, %0\n"
513 " .previous\n"
514 : "=r" (val), "=r" (tmp)
515 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
516 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
517
518 return __pte(val);
519 }
520
pte_mkwrite_novma(pte_t pte)521 static inline pte_t pte_mkwrite_novma(pte_t pte)
522 {
523 unsigned long val = pte_val(pte), mask;
524
525 __asm__ __volatile__(
526 "\n661: mov %1, %0\n"
527 " nop\n"
528 " .section .sun4v_2insn_patch, \"ax\"\n"
529 " .word 661b\n"
530 " sethi %%uhi(%2), %0\n"
531 " sllx %0, 32, %0\n"
532 " .previous\n"
533 : "=r" (mask)
534 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
535
536 pte = __pte(val | mask);
537 return pte_dirty(pte) ? __pte_mkhwwrite(pte) : pte;
538 }
539
pte_wrprotect(pte_t pte)540 static inline pte_t pte_wrprotect(pte_t pte)
541 {
542 unsigned long val = pte_val(pte), tmp;
543
544 __asm__ __volatile__(
545 "\n661: andn %0, %3, %0\n"
546 " nop\n"
547 "\n662: nop\n"
548 " nop\n"
549 " .section .sun4v_2insn_patch, \"ax\"\n"
550 " .word 661b\n"
551 " sethi %%uhi(%4), %1\n"
552 " sllx %1, 32, %1\n"
553 " .word 662b\n"
554 " or %1, %%lo(%4), %1\n"
555 " andn %0, %1, %0\n"
556 " .previous\n"
557 : "=r" (val), "=r" (tmp)
558 : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
559 "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
560
561 return __pte(val);
562 }
563
pte_mkold(pte_t pte)564 static inline pte_t pte_mkold(pte_t pte)
565 {
566 unsigned long mask;
567
568 __asm__ __volatile__(
569 "\n661: mov %1, %0\n"
570 " nop\n"
571 " .section .sun4v_2insn_patch, \"ax\"\n"
572 " .word 661b\n"
573 " sethi %%uhi(%2), %0\n"
574 " sllx %0, 32, %0\n"
575 " .previous\n"
576 : "=r" (mask)
577 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
578
579 mask |= _PAGE_R;
580
581 return __pte(pte_val(pte) & ~mask);
582 }
583
pte_mkyoung(pte_t pte)584 static inline pte_t pte_mkyoung(pte_t pte)
585 {
586 unsigned long mask;
587
588 __asm__ __volatile__(
589 "\n661: mov %1, %0\n"
590 " nop\n"
591 " .section .sun4v_2insn_patch, \"ax\"\n"
592 " .word 661b\n"
593 " sethi %%uhi(%2), %0\n"
594 " sllx %0, 32, %0\n"
595 " .previous\n"
596 : "=r" (mask)
597 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
598
599 mask |= _PAGE_R;
600
601 return __pte(pte_val(pte) | mask);
602 }
603
pte_mkspecial(pte_t pte)604 static inline pte_t pte_mkspecial(pte_t pte)
605 {
606 pte_val(pte) |= _PAGE_SPECIAL;
607 return pte;
608 }
609
pte_mkmcd(pte_t pte)610 static inline pte_t pte_mkmcd(pte_t pte)
611 {
612 pte_val(pte) |= _PAGE_MCD_4V;
613 return pte;
614 }
615
pte_mknotmcd(pte_t pte)616 static inline pte_t pte_mknotmcd(pte_t pte)
617 {
618 pte_val(pte) &= ~_PAGE_MCD_4V;
619 return pte;
620 }
621
pte_young(pte_t pte)622 static inline unsigned long pte_young(pte_t pte)
623 {
624 unsigned long mask;
625
626 __asm__ __volatile__(
627 "\n661: mov %1, %0\n"
628 " nop\n"
629 " .section .sun4v_2insn_patch, \"ax\"\n"
630 " .word 661b\n"
631 " sethi %%uhi(%2), %0\n"
632 " sllx %0, 32, %0\n"
633 " .previous\n"
634 : "=r" (mask)
635 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
636
637 return (pte_val(pte) & mask);
638 }
639
pte_exec(pte_t pte)640 static inline unsigned long pte_exec(pte_t pte)
641 {
642 unsigned long mask;
643
644 __asm__ __volatile__(
645 "\n661: sethi %%hi(%1), %0\n"
646 " .section .sun4v_1insn_patch, \"ax\"\n"
647 " .word 661b\n"
648 " mov %2, %0\n"
649 " .previous\n"
650 : "=r" (mask)
651 : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
652
653 return (pte_val(pte) & mask);
654 }
655
pte_present(pte_t pte)656 static inline unsigned long pte_present(pte_t pte)
657 {
658 unsigned long val = pte_val(pte);
659
660 __asm__ __volatile__(
661 "\n661: and %0, %2, %0\n"
662 " .section .sun4v_1insn_patch, \"ax\"\n"
663 " .word 661b\n"
664 " and %0, %3, %0\n"
665 " .previous\n"
666 : "=r" (val)
667 : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
668
669 return val;
670 }
671
672 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)673 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
674 {
675 return pte_val(a) & _PAGE_VALID;
676 }
677
pte_special(pte_t pte)678 static inline unsigned long pte_special(pte_t pte)
679 {
680 return pte_val(pte) & _PAGE_SPECIAL;
681 }
682
683 #define pmd_leaf pmd_large
pmd_large(pmd_t pmd)684 static inline unsigned long pmd_large(pmd_t pmd)
685 {
686 pte_t pte = __pte(pmd_val(pmd));
687
688 return pte_val(pte) & _PAGE_PMD_HUGE;
689 }
690
pmd_pfn(pmd_t pmd)691 static inline unsigned long pmd_pfn(pmd_t pmd)
692 {
693 pte_t pte = __pte(pmd_val(pmd));
694
695 return pte_pfn(pte);
696 }
697
698 #define pmd_write pmd_write
pmd_write(pmd_t pmd)699 static inline unsigned long pmd_write(pmd_t pmd)
700 {
701 pte_t pte = __pte(pmd_val(pmd));
702
703 return pte_write(pte);
704 }
705
706 #define pud_write(pud) pte_write(__pte(pud_val(pud)))
707
708 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_dirty(pmd_t pmd)709 static inline unsigned long pmd_dirty(pmd_t pmd)
710 {
711 pte_t pte = __pte(pmd_val(pmd));
712
713 return pte_dirty(pte);
714 }
715
716 #define pmd_young pmd_young
pmd_young(pmd_t pmd)717 static inline unsigned long pmd_young(pmd_t pmd)
718 {
719 pte_t pte = __pte(pmd_val(pmd));
720
721 return pte_young(pte);
722 }
723
pmd_trans_huge(pmd_t pmd)724 static inline unsigned long pmd_trans_huge(pmd_t pmd)
725 {
726 pte_t pte = __pte(pmd_val(pmd));
727
728 return pte_val(pte) & _PAGE_PMD_HUGE;
729 }
730
pmd_mkold(pmd_t pmd)731 static inline pmd_t pmd_mkold(pmd_t pmd)
732 {
733 pte_t pte = __pte(pmd_val(pmd));
734
735 pte = pte_mkold(pte);
736
737 return __pmd(pte_val(pte));
738 }
739
pmd_wrprotect(pmd_t pmd)740 static inline pmd_t pmd_wrprotect(pmd_t pmd)
741 {
742 pte_t pte = __pte(pmd_val(pmd));
743
744 pte = pte_wrprotect(pte);
745
746 return __pmd(pte_val(pte));
747 }
748
pmd_mkdirty(pmd_t pmd)749 static inline pmd_t pmd_mkdirty(pmd_t pmd)
750 {
751 pte_t pte = __pte(pmd_val(pmd));
752
753 pte = pte_mkdirty(pte);
754
755 return __pmd(pte_val(pte));
756 }
757
pmd_mkclean(pmd_t pmd)758 static inline pmd_t pmd_mkclean(pmd_t pmd)
759 {
760 pte_t pte = __pte(pmd_val(pmd));
761
762 pte = pte_mkclean(pte);
763
764 return __pmd(pte_val(pte));
765 }
766
pmd_mkyoung(pmd_t pmd)767 static inline pmd_t pmd_mkyoung(pmd_t pmd)
768 {
769 pte_t pte = __pte(pmd_val(pmd));
770
771 pte = pte_mkyoung(pte);
772
773 return __pmd(pte_val(pte));
774 }
775
pmd_mkwrite_novma(pmd_t pmd)776 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
777 {
778 pte_t pte = __pte(pmd_val(pmd));
779
780 pte = pte_mkwrite_novma(pte);
781
782 return __pmd(pte_val(pte));
783 }
784
pmd_pgprot(pmd_t entry)785 static inline pgprot_t pmd_pgprot(pmd_t entry)
786 {
787 unsigned long val = pmd_val(entry);
788
789 return __pgprot(val);
790 }
791 #endif
792
pmd_present(pmd_t pmd)793 static inline int pmd_present(pmd_t pmd)
794 {
795 return pmd_val(pmd) != 0UL;
796 }
797
798 #define pmd_none(pmd) (!pmd_val(pmd))
799
800 /* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is
801 * very simple, it's just the physical address. PTE tables are of
802 * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
803 * the top bits outside of the range of any physical address size we
804 * support are clear as well. We also validate the physical itself.
805 */
806 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
807
808 #define pud_none(pud) (!pud_val(pud))
809
810 #define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK)
811
812 #define p4d_none(p4d) (!p4d_val(p4d))
813
814 #define p4d_bad(p4d) (p4d_val(p4d) & ~PAGE_MASK)
815
816 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
817 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
818 pmd_t *pmdp, pmd_t pmd);
819 #else
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)820 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
821 pmd_t *pmdp, pmd_t pmd)
822 {
823 *pmdp = pmd;
824 }
825 #endif
826
pmd_set(struct mm_struct * mm,pmd_t * pmdp,pte_t * ptep)827 static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
828 {
829 unsigned long val = __pa((unsigned long) (ptep));
830
831 pmd_val(*pmdp) = val;
832 }
833
834 #define pud_set(pudp, pmdp) \
835 (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
pmd_page_vaddr(pmd_t pmd)836 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
837 {
838 pte_t pte = __pte(pmd_val(pmd));
839 unsigned long pfn;
840
841 pfn = pte_pfn(pte);
842
843 return ((unsigned long) __va(pfn << PAGE_SHIFT));
844 }
845
pud_pgtable(pud_t pud)846 static inline pmd_t *pud_pgtable(pud_t pud)
847 {
848 pte_t pte = __pte(pud_val(pud));
849 unsigned long pfn;
850
851 pfn = pte_pfn(pte);
852
853 return ((pmd_t *) __va(pfn << PAGE_SHIFT));
854 }
855
856 #define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
857 #define pud_page(pud) virt_to_page((void *)pud_pgtable(pud))
858 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
859 #define pud_present(pud) (pud_val(pud) != 0U)
860 #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
861 #define p4d_pgtable(p4d) \
862 ((pud_t *) __va(p4d_val(p4d)))
863 #define p4d_present(p4d) (p4d_val(p4d) != 0U)
864 #define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL)
865
866 /* only used by the stubbed out hugetlb gup code, should never be called */
867 #define p4d_page(p4d) NULL
868
869 #define pud_leaf pud_large
pud_large(pud_t pud)870 static inline unsigned long pud_large(pud_t pud)
871 {
872 pte_t pte = __pte(pud_val(pud));
873
874 return pte_val(pte) & _PAGE_PMD_HUGE;
875 }
876
pud_pfn(pud_t pud)877 static inline unsigned long pud_pfn(pud_t pud)
878 {
879 pte_t pte = __pte(pud_val(pud));
880
881 return pte_pfn(pte);
882 }
883
884 /* Same in both SUN4V and SUN4U. */
885 #define pte_none(pte) (!pte_val(pte))
886
887 #define p4d_set(p4dp, pudp) \
888 (p4d_val(*(p4dp)) = (__pa((unsigned long) (pudp))))
889
890 /* We cannot include <linux/mm_types.h> at this point yet: */
891 extern struct mm_struct init_mm;
892
893 /* Actual page table PTE updates. */
894 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
895 pte_t *ptep, pte_t orig, int fullmm,
896 unsigned int hugepage_shift);
897
maybe_tlb_batch_add(struct mm_struct * mm,unsigned long vaddr,pte_t * ptep,pte_t orig,int fullmm,unsigned int hugepage_shift)898 static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
899 pte_t *ptep, pte_t orig, int fullmm,
900 unsigned int hugepage_shift)
901 {
902 /* It is more efficient to let flush_tlb_kernel_range()
903 * handle init_mm tlb flushes.
904 *
905 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
906 * and SUN4V pte layout, so this inline test is fine.
907 */
908 if (likely(mm != &init_mm) && pte_accessible(mm, orig))
909 tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift);
910 }
911
912 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)913 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
914 unsigned long addr,
915 pmd_t *pmdp)
916 {
917 pmd_t pmd = *pmdp;
918 set_pmd_at(mm, addr, pmdp, __pmd(0UL));
919 return pmd;
920 }
921
__set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,int fullmm)922 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
923 pte_t *ptep, pte_t pte, int fullmm)
924 {
925 pte_t orig = *ptep;
926
927 *ptep = pte;
928 maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
929 }
930
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned int nr)931 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
932 pte_t *ptep, pte_t pte, unsigned int nr)
933 {
934 arch_enter_lazy_mmu_mode();
935 for (;;) {
936 __set_pte_at(mm, addr, ptep, pte, 0);
937 if (--nr == 0)
938 break;
939 ptep++;
940 pte_val(pte) += PAGE_SIZE;
941 addr += PAGE_SIZE;
942 }
943 arch_leave_lazy_mmu_mode();
944 }
945 #define set_ptes set_ptes
946
947 #define pte_clear(mm,addr,ptep) \
948 set_pte_at((mm), (addr), (ptep), __pte(0UL))
949
950 #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
951 #define pte_clear_not_present_full(mm,addr,ptep,fullmm) \
952 __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
953
954 #ifdef DCACHE_ALIASING_POSSIBLE
955 #define __HAVE_ARCH_MOVE_PTE
956 #define move_pte(pte, prot, old_addr, new_addr) \
957 ({ \
958 pte_t newpte = (pte); \
959 if (tlb_type != hypervisor && pte_present(pte)) { \
960 unsigned long this_pfn = pte_pfn(pte); \
961 \
962 if (pfn_valid(this_pfn) && \
963 (((old_addr) ^ (new_addr)) & (1 << 13))) \
964 flush_dcache_folio_all(current->mm, \
965 page_folio(pfn_to_page(this_pfn))); \
966 } \
967 newpte; \
968 })
969 #endif
970
971 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
972
973 void paging_init(void);
974 unsigned long find_ecache_flush_span(unsigned long size);
975
976 struct seq_file;
977 void mmu_info(struct seq_file *);
978
979 struct vm_area_struct;
980 void update_mmu_cache_range(struct vm_fault *, struct vm_area_struct *,
981 unsigned long addr, pte_t *ptep, unsigned int nr);
982 #define update_mmu_cache(vma, addr, ptep) \
983 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
984 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
985 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
986 pmd_t *pmd);
987
988 #define __HAVE_ARCH_PMDP_INVALIDATE
989 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
990 pmd_t *pmdp);
991
992 #define __HAVE_ARCH_PGTABLE_DEPOSIT
993 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
994 pgtable_t pgtable);
995
996 #define __HAVE_ARCH_PGTABLE_WITHDRAW
997 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
998 #endif
999
1000 /*
1001 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
1002 * are !pte_none() && !pte_present().
1003 *
1004 * Format of swap PTEs:
1005 *
1006 * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
1007 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
1008 * <--------------------------- offset ---------------------------
1009 *
1010 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
1011 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
1012 * --------------------> E <-- type ---> <------- zeroes -------->
1013 */
1014 #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0x7fUL)
1015 #define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
1016 #define __swp_entry(type, offset) \
1017 ( (swp_entry_t) \
1018 { \
1019 ((((long)(type) & 0x7fUL) << PAGE_SHIFT) | \
1020 ((long)(offset) << (PAGE_SHIFT + 8UL))) \
1021 } )
1022 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1023 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1024
pte_swp_exclusive(pte_t pte)1025 static inline int pte_swp_exclusive(pte_t pte)
1026 {
1027 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
1028 }
1029
pte_swp_mkexclusive(pte_t pte)1030 static inline pte_t pte_swp_mkexclusive(pte_t pte)
1031 {
1032 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
1033 }
1034
pte_swp_clear_exclusive(pte_t pte)1035 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
1036 {
1037 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
1038 }
1039
1040 int page_in_phys_avail(unsigned long paddr);
1041
1042 /*
1043 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
1044 * its high 4 bits. These macros/functions put it there or get it from there.
1045 */
1046 #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
1047 #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
1048 #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
1049
1050 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
1051 unsigned long, pgprot_t);
1052
1053 void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
1054 unsigned long addr, pte_t pte);
1055
1056 int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
1057 unsigned long addr, pte_t oldpte);
1058
1059 #define __HAVE_ARCH_DO_SWAP_PAGE
arch_do_swap_page(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t pte,pte_t oldpte)1060 static inline void arch_do_swap_page(struct mm_struct *mm,
1061 struct vm_area_struct *vma,
1062 unsigned long addr,
1063 pte_t pte, pte_t oldpte)
1064 {
1065 /* If this is a new page being mapped in, there can be no
1066 * ADI tags stored away for this page. Skip looking for
1067 * stored tags
1068 */
1069 if (pte_none(oldpte))
1070 return;
1071
1072 if (adi_state.enabled && (pte_val(pte) & _PAGE_MCD_4V))
1073 adi_restore_tags(mm, vma, addr, pte);
1074 }
1075
1076 #define __HAVE_ARCH_UNMAP_ONE
arch_unmap_one(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t oldpte)1077 static inline int arch_unmap_one(struct mm_struct *mm,
1078 struct vm_area_struct *vma,
1079 unsigned long addr, pte_t oldpte)
1080 {
1081 if (adi_state.enabled && (pte_val(oldpte) & _PAGE_MCD_4V))
1082 return adi_save_tags(mm, vma, addr, oldpte);
1083 return 0;
1084 }
1085
io_remap_pfn_range(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)1086 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
1087 unsigned long from, unsigned long pfn,
1088 unsigned long size, pgprot_t prot)
1089 {
1090 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
1091 int space = GET_IOSPACE(pfn);
1092 unsigned long phys_base;
1093
1094 phys_base = offset | (((unsigned long) space) << 32UL);
1095
1096 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
1097 }
1098 #define io_remap_pfn_range io_remap_pfn_range
1099
__untagged_addr(unsigned long start)1100 static inline unsigned long __untagged_addr(unsigned long start)
1101 {
1102 if (adi_capable()) {
1103 long addr = start;
1104
1105 /* If userspace has passed a versioned address, kernel
1106 * will not find it in the VMAs since it does not store
1107 * the version tags in the list of VMAs. Storing version
1108 * tags in list of VMAs is impractical since they can be
1109 * changed any time from userspace without dropping into
1110 * kernel. Any address search in VMAs will be done with
1111 * non-versioned addresses. Ensure the ADI version bits
1112 * are dropped here by sign extending the last bit before
1113 * ADI bits. IOMMU does not implement version tags.
1114 */
1115 return (addr << (long)adi_nbits()) >> (long)adi_nbits();
1116 }
1117
1118 return start;
1119 }
1120 #define untagged_addr(addr) \
1121 ((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
1122
pte_access_permitted(pte_t pte,bool write)1123 static inline bool pte_access_permitted(pte_t pte, bool write)
1124 {
1125 u64 prot;
1126
1127 if (tlb_type == hypervisor) {
1128 prot = _PAGE_PRESENT_4V | _PAGE_P_4V;
1129 if (write)
1130 prot |= _PAGE_WRITE_4V;
1131 } else {
1132 prot = _PAGE_PRESENT_4U | _PAGE_P_4U;
1133 if (write)
1134 prot |= _PAGE_WRITE_4U;
1135 }
1136
1137 return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
1138 }
1139 #define pte_access_permitted pte_access_permitted
1140
1141 /* We provide our own get_unmapped_area to cope with VA holes and
1142 * SHM area cache aliasing for userland.
1143 */
1144 #define HAVE_ARCH_UNMAPPED_AREA
1145 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1146
1147 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
1148 * the largest alignment possible such that larget PTEs can be used.
1149 */
1150 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
1151 unsigned long, unsigned long,
1152 unsigned long);
1153 #define HAVE_ARCH_FB_UNMAPPED_AREA
1154
1155 void sun4v_register_fault_status(void);
1156 void sun4v_ktsb_register(void);
1157 void __init cheetah_ecache_flush_init(void);
1158 void sun4v_patch_tlb_handlers(void);
1159
1160 extern unsigned long cmdline_memory_size;
1161
1162 asmlinkage void do_sparc64_fault(struct pt_regs *regs);
1163
1164 #define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD))
1165
1166 #ifdef CONFIG_HUGETLB_PAGE
1167
1168 #define pud_leaf_size pud_leaf_size
1169 extern unsigned long pud_leaf_size(pud_t pud);
1170
1171 #define pmd_leaf_size pmd_leaf_size
1172 extern unsigned long pmd_leaf_size(pmd_t pmd);
1173
1174 #define pte_leaf_size pte_leaf_size
1175 extern unsigned long pte_leaf_size(pte_t pte);
1176
1177 #endif /* CONFIG_HUGETLB_PAGE */
1178
1179 #endif /* !(__ASSEMBLY__) */
1180
1181 #endif /* !(_SPARC64_PGTABLE_H) */
1182