1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6 #ifndef _ASM_RISCV_PGTABLE_H
7 #define _ASM_RISCV_PGTABLE_H
8
9 #include <linux/mmzone.h>
10 #include <linux/sizes.h>
11
12 #include <asm/pgtable-bits.h>
13
14 #ifndef CONFIG_MMU
15 #define KERNEL_LINK_ADDR PAGE_OFFSET
16 #define KERN_VIRT_SIZE (UL(-1))
17 #else
18
19 #define ADDRESS_SPACE_END (UL(-1))
20
21 #ifdef CONFIG_64BIT
22 /* Leave 2GB for kernel and BPF at the end of the address space */
23 #define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
24 #else
25 #define KERNEL_LINK_ADDR PAGE_OFFSET
26 #endif
27
28 /* Number of entries in the page global directory */
29 #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
30 /* Number of entries in the page table */
31 #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
32
33 /*
34 * Half of the kernel address space (1/4 of the entries of the page global
35 * directory) is for the direct mapping.
36 */
37 #define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
38
39 #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
40 #define VMALLOC_END PAGE_OFFSET
41 #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
42
43 #define BPF_JIT_REGION_SIZE (SZ_128M)
44 #ifdef CONFIG_64BIT
45 #define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
46 #define BPF_JIT_REGION_END (MODULES_END)
47 #else
48 #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
49 #define BPF_JIT_REGION_END (VMALLOC_END)
50 #endif
51
52 /* Modules always live before the kernel */
53 #ifdef CONFIG_64BIT
54 /* This is used to define the end of the KASAN shadow region */
55 #define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
56 #define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
57 #define MODULES_END (PFN_ALIGN((unsigned long)&_start))
58 #endif
59
60 /*
61 * Roughly size the vmemmap space to be large enough to fit enough
62 * struct pages to map half the virtual address space. Then
63 * position vmemmap directly below the VMALLOC region.
64 */
65 #define VA_BITS_SV32 32
66 #ifdef CONFIG_64BIT
67 #define VA_BITS_SV39 39
68 #define VA_BITS_SV48 48
69 #define VA_BITS_SV57 57
70
71 #define VA_BITS (pgtable_l5_enabled ? \
72 VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39))
73 #else
74 #define VA_BITS VA_BITS_SV32
75 #endif
76
77 #define VMEMMAP_SHIFT \
78 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
79 #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
80 #define VMEMMAP_END VMALLOC_START
81 #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
82
83 /*
84 * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
85 * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
86 */
87 #define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
88
89 #define PCI_IO_SIZE SZ_16M
90 #define PCI_IO_END VMEMMAP_START
91 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
92
93 #define FIXADDR_TOP PCI_IO_START
94 #ifdef CONFIG_64BIT
95 #define MAX_FDT_SIZE PMD_SIZE
96 #define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
97 #define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE)
98 #else
99 #define MAX_FDT_SIZE PGDIR_SIZE
100 #define FIX_FDT_SIZE MAX_FDT_SIZE
101 #define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE)
102 #endif
103 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
104
105 #endif
106
107 #ifdef CONFIG_XIP_KERNEL
108 #define XIP_OFFSET SZ_32M
109 #define XIP_OFFSET_MASK (SZ_32M - 1)
110 #else
111 #define XIP_OFFSET 0
112 #endif
113
114 #ifndef __ASSEMBLY__
115
116 #include <asm/page.h>
117 #include <asm/tlbflush.h>
118 #include <linux/mm_types.h>
119 #include <asm/compat.h>
120
121 #define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
122
123 #ifdef CONFIG_64BIT
124 #include <asm/pgtable-64.h>
125
126 #define VA_USER_SV39 (UL(1) << (VA_BITS_SV39 - 1))
127 #define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1))
128 #define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1))
129
130 #ifdef CONFIG_COMPAT
131 #define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
132 #define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
133 #define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
134 #define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
135 #else
136 #define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
137 #define MMAP_MIN_VA_BITS (VA_BITS_SV39)
138 #endif /* CONFIG_COMPAT */
139
140 #else
141 #include <asm/pgtable-32.h>
142 #endif /* CONFIG_64BIT */
143
144 #include <linux/page_table_check.h>
145
146 #ifdef CONFIG_XIP_KERNEL
147 #define XIP_FIXUP(addr) ({ \
148 uintptr_t __a = (uintptr_t)(addr); \
149 (__a >= CONFIG_XIP_PHYS_ADDR && \
150 __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
151 __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
152 __a; \
153 })
154 #else
155 #define XIP_FIXUP(addr) (addr)
156 #endif /* CONFIG_XIP_KERNEL */
157
158 struct pt_alloc_ops {
159 pte_t *(*get_pte_virt)(phys_addr_t pa);
160 phys_addr_t (*alloc_pte)(uintptr_t va);
161 #ifndef __PAGETABLE_PMD_FOLDED
162 pmd_t *(*get_pmd_virt)(phys_addr_t pa);
163 phys_addr_t (*alloc_pmd)(uintptr_t va);
164 pud_t *(*get_pud_virt)(phys_addr_t pa);
165 phys_addr_t (*alloc_pud)(uintptr_t va);
166 p4d_t *(*get_p4d_virt)(phys_addr_t pa);
167 phys_addr_t (*alloc_p4d)(uintptr_t va);
168 #endif
169 };
170
171 extern struct pt_alloc_ops pt_ops __initdata;
172
173 #ifdef CONFIG_MMU
174 /* Number of PGD entries that a user-mode program can use */
175 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
176
177 /* Page protection bits */
178 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
179
180 #define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ)
181 #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
182 #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
183 #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
184 #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
185 #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
186 _PAGE_EXEC | _PAGE_WRITE)
187
188 #define PAGE_COPY PAGE_READ
189 #define PAGE_COPY_EXEC PAGE_READ_EXEC
190 #define PAGE_SHARED PAGE_WRITE
191 #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
192
193 #define _PAGE_KERNEL (_PAGE_READ \
194 | _PAGE_WRITE \
195 | _PAGE_PRESENT \
196 | _PAGE_ACCESSED \
197 | _PAGE_DIRTY \
198 | _PAGE_GLOBAL)
199
200 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
201 #define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
202 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
203 #define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
204 | _PAGE_EXEC)
205
206 #define PAGE_TABLE __pgprot(_PAGE_TABLE)
207
208 #define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
209 #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
210
211 extern pgd_t swapper_pg_dir[];
212 extern pgd_t trampoline_pg_dir[];
213 extern pgd_t early_pg_dir[];
214
215 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_present(pmd_t pmd)216 static inline int pmd_present(pmd_t pmd)
217 {
218 /*
219 * Checking for _PAGE_LEAF is needed too because:
220 * When splitting a THP, split_huge_page() will temporarily clear
221 * the present bit, in this situation, pmd_present() and
222 * pmd_trans_huge() still needs to return true.
223 */
224 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
225 }
226 #else
pmd_present(pmd_t pmd)227 static inline int pmd_present(pmd_t pmd)
228 {
229 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
230 }
231 #endif
232
pmd_none(pmd_t pmd)233 static inline int pmd_none(pmd_t pmd)
234 {
235 return (pmd_val(pmd) == 0);
236 }
237
pmd_bad(pmd_t pmd)238 static inline int pmd_bad(pmd_t pmd)
239 {
240 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
241 }
242
243 #define pmd_leaf pmd_leaf
pmd_leaf(pmd_t pmd)244 static inline int pmd_leaf(pmd_t pmd)
245 {
246 return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
247 }
248
set_pmd(pmd_t * pmdp,pmd_t pmd)249 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
250 {
251 *pmdp = pmd;
252 }
253
pmd_clear(pmd_t * pmdp)254 static inline void pmd_clear(pmd_t *pmdp)
255 {
256 set_pmd(pmdp, __pmd(0));
257 }
258
pfn_pgd(unsigned long pfn,pgprot_t prot)259 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
260 {
261 unsigned long prot_val = pgprot_val(prot);
262
263 ALT_THEAD_PMA(prot_val);
264
265 return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
266 }
267
_pgd_pfn(pgd_t pgd)268 static inline unsigned long _pgd_pfn(pgd_t pgd)
269 {
270 return __page_val_to_pfn(pgd_val(pgd));
271 }
272
pmd_page(pmd_t pmd)273 static inline struct page *pmd_page(pmd_t pmd)
274 {
275 return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
276 }
277
pmd_page_vaddr(pmd_t pmd)278 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
279 {
280 return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
281 }
282
pmd_pte(pmd_t pmd)283 static inline pte_t pmd_pte(pmd_t pmd)
284 {
285 return __pte(pmd_val(pmd));
286 }
287
pud_pte(pud_t pud)288 static inline pte_t pud_pte(pud_t pud)
289 {
290 return __pte(pud_val(pud));
291 }
292
293 #ifdef CONFIG_RISCV_ISA_SVNAPOT
294
has_svnapot(void)295 static __always_inline bool has_svnapot(void)
296 {
297 return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT);
298 }
299
pte_napot(pte_t pte)300 static inline unsigned long pte_napot(pte_t pte)
301 {
302 return pte_val(pte) & _PAGE_NAPOT;
303 }
304
pte_mknapot(pte_t pte,unsigned int order)305 static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
306 {
307 int pos = order - 1 + _PAGE_PFN_SHIFT;
308 unsigned long napot_bit = BIT(pos);
309 unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
310
311 return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
312 }
313
314 #else
315
has_svnapot(void)316 static __always_inline bool has_svnapot(void) { return false; }
317
pte_napot(pte_t pte)318 static inline unsigned long pte_napot(pte_t pte)
319 {
320 return 0;
321 }
322
323 #endif /* CONFIG_RISCV_ISA_SVNAPOT */
324
325 /* Yields the page frame number (PFN) of a page table entry */
pte_pfn(pte_t pte)326 static inline unsigned long pte_pfn(pte_t pte)
327 {
328 unsigned long res = __page_val_to_pfn(pte_val(pte));
329
330 if (has_svnapot() && pte_napot(pte))
331 res = res & (res - 1UL);
332
333 return res;
334 }
335
336 #define pte_page(x) pfn_to_page(pte_pfn(x))
337
338 /* Constructs a page table entry */
pfn_pte(unsigned long pfn,pgprot_t prot)339 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
340 {
341 unsigned long prot_val = pgprot_val(prot);
342
343 ALT_THEAD_PMA(prot_val);
344
345 return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
346 }
347
348 #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
349
pte_present(pte_t pte)350 static inline int pte_present(pte_t pte)
351 {
352 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
353 }
354
pte_none(pte_t pte)355 static inline int pte_none(pte_t pte)
356 {
357 return (pte_val(pte) == 0);
358 }
359
pte_write(pte_t pte)360 static inline int pte_write(pte_t pte)
361 {
362 return pte_val(pte) & _PAGE_WRITE;
363 }
364
pte_exec(pte_t pte)365 static inline int pte_exec(pte_t pte)
366 {
367 return pte_val(pte) & _PAGE_EXEC;
368 }
369
pte_user(pte_t pte)370 static inline int pte_user(pte_t pte)
371 {
372 return pte_val(pte) & _PAGE_USER;
373 }
374
pte_huge(pte_t pte)375 static inline int pte_huge(pte_t pte)
376 {
377 return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
378 }
379
pte_dirty(pte_t pte)380 static inline int pte_dirty(pte_t pte)
381 {
382 return pte_val(pte) & _PAGE_DIRTY;
383 }
384
pte_young(pte_t pte)385 static inline int pte_young(pte_t pte)
386 {
387 return pte_val(pte) & _PAGE_ACCESSED;
388 }
389
pte_special(pte_t pte)390 static inline int pte_special(pte_t pte)
391 {
392 return pte_val(pte) & _PAGE_SPECIAL;
393 }
394
395 /* static inline pte_t pte_rdprotect(pte_t pte) */
396
pte_wrprotect(pte_t pte)397 static inline pte_t pte_wrprotect(pte_t pte)
398 {
399 return __pte(pte_val(pte) & ~(_PAGE_WRITE));
400 }
401
402 /* static inline pte_t pte_mkread(pte_t pte) */
403
pte_mkwrite_novma(pte_t pte)404 static inline pte_t pte_mkwrite_novma(pte_t pte)
405 {
406 return __pte(pte_val(pte) | _PAGE_WRITE);
407 }
408
409 /* static inline pte_t pte_mkexec(pte_t pte) */
410
pte_mkdirty(pte_t pte)411 static inline pte_t pte_mkdirty(pte_t pte)
412 {
413 return __pte(pte_val(pte) | _PAGE_DIRTY);
414 }
415
pte_mkclean(pte_t pte)416 static inline pte_t pte_mkclean(pte_t pte)
417 {
418 return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
419 }
420
pte_mkyoung(pte_t pte)421 static inline pte_t pte_mkyoung(pte_t pte)
422 {
423 return __pte(pte_val(pte) | _PAGE_ACCESSED);
424 }
425
pte_mkold(pte_t pte)426 static inline pte_t pte_mkold(pte_t pte)
427 {
428 return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
429 }
430
pte_mkspecial(pte_t pte)431 static inline pte_t pte_mkspecial(pte_t pte)
432 {
433 return __pte(pte_val(pte) | _PAGE_SPECIAL);
434 }
435
pte_mkhuge(pte_t pte)436 static inline pte_t pte_mkhuge(pte_t pte)
437 {
438 return pte;
439 }
440
441 #define pte_leaf_size(pte) (pte_napot(pte) ? \
442 napot_cont_size(napot_cont_order(pte)) :\
443 PAGE_SIZE)
444
445 #ifdef CONFIG_NUMA_BALANCING
446 /*
447 * See the comment in include/asm-generic/pgtable.h
448 */
pte_protnone(pte_t pte)449 static inline int pte_protnone(pte_t pte)
450 {
451 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
452 }
453
pmd_protnone(pmd_t pmd)454 static inline int pmd_protnone(pmd_t pmd)
455 {
456 return pte_protnone(pmd_pte(pmd));
457 }
458 #endif
459
460 /* Modify page protection bits */
pte_modify(pte_t pte,pgprot_t newprot)461 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
462 {
463 unsigned long newprot_val = pgprot_val(newprot);
464
465 ALT_THEAD_PMA(newprot_val);
466
467 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
468 }
469
470 #define pgd_ERROR(e) \
471 pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
472
473
474 /* Commit new configuration to MMU hardware */
update_mmu_cache_range(struct vm_fault * vmf,struct vm_area_struct * vma,unsigned long address,pte_t * ptep,unsigned int nr)475 static inline void update_mmu_cache_range(struct vm_fault *vmf,
476 struct vm_area_struct *vma, unsigned long address,
477 pte_t *ptep, unsigned int nr)
478 {
479 /*
480 * The kernel assumes that TLBs don't cache invalid entries, but
481 * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
482 * cache flush; it is necessary even after writing invalid entries.
483 * Relying on flush_tlb_fix_spurious_fault would suffice, but
484 * the extra traps reduce performance. So, eagerly SFENCE.VMA.
485 */
486 while (nr--)
487 local_flush_tlb_page(address + nr * PAGE_SIZE);
488 }
489 #define update_mmu_cache(vma, addr, ptep) \
490 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
491
492 #define __HAVE_ARCH_UPDATE_MMU_TLB
493 #define update_mmu_tlb update_mmu_cache
494
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)495 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
496 unsigned long address, pmd_t *pmdp)
497 {
498 pte_t *ptep = (pte_t *)pmdp;
499
500 update_mmu_cache(vma, address, ptep);
501 }
502
503 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t pte_a,pte_t pte_b)504 static inline int pte_same(pte_t pte_a, pte_t pte_b)
505 {
506 return pte_val(pte_a) == pte_val(pte_b);
507 }
508
509 /*
510 * Certain architectures need to do special things when PTEs within
511 * a page table are directly modified. Thus, the following hook is
512 * made available.
513 */
set_pte(pte_t * ptep,pte_t pteval)514 static inline void set_pte(pte_t *ptep, pte_t pteval)
515 {
516 *ptep = pteval;
517 }
518
519 void flush_icache_pte(pte_t pte);
520
__set_pte_at(pte_t * ptep,pte_t pteval)521 static inline void __set_pte_at(pte_t *ptep, pte_t pteval)
522 {
523 if (pte_present(pteval) && pte_exec(pteval))
524 flush_icache_pte(pteval);
525
526 set_pte(ptep, pteval);
527 }
528
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pteval,unsigned int nr)529 static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
530 pte_t *ptep, pte_t pteval, unsigned int nr)
531 {
532 page_table_check_ptes_set(mm, ptep, pteval, nr);
533
534 for (;;) {
535 __set_pte_at(ptep, pteval);
536 if (--nr == 0)
537 break;
538 ptep++;
539 pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
540 }
541 }
542 #define set_ptes set_ptes
543
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)544 static inline void pte_clear(struct mm_struct *mm,
545 unsigned long addr, pte_t *ptep)
546 {
547 __set_pte_at(ptep, __pte(0));
548 }
549
550 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long address,pte_t * ptep,pte_t entry,int dirty)551 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
552 unsigned long address, pte_t *ptep,
553 pte_t entry, int dirty)
554 {
555 if (!pte_same(*ptep, entry))
556 __set_pte_at(ptep, entry);
557 /*
558 * update_mmu_cache will unconditionally execute, handling both
559 * the case that the PTE changed and the spurious fault case.
560 */
561 return true;
562 }
563
564 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)565 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
566 unsigned long address, pte_t *ptep)
567 {
568 pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
569
570 page_table_check_pte_clear(mm, pte);
571
572 return pte;
573 }
574
575 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)576 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
577 unsigned long address,
578 pte_t *ptep)
579 {
580 if (!pte_young(*ptep))
581 return 0;
582 return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
583 }
584
585 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)586 static inline void ptep_set_wrprotect(struct mm_struct *mm,
587 unsigned long address, pte_t *ptep)
588 {
589 atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
590 }
591
592 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)593 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
594 unsigned long address, pte_t *ptep)
595 {
596 /*
597 * This comment is borrowed from x86, but applies equally to RISC-V:
598 *
599 * Clearing the accessed bit without a TLB flush
600 * doesn't cause data corruption. [ It could cause incorrect
601 * page aging and the (mistaken) reclaim of hot pages, but the
602 * chance of that should be relatively low. ]
603 *
604 * So as a performance optimization don't flush the TLB when
605 * clearing the accessed bit, it will eventually be flushed by
606 * a context switch or a VM operation anyway. [ In the rare
607 * event of it not getting flushed for a long time the delay
608 * shouldn't really matter because there's no real memory
609 * pressure for swapout to react to. ]
610 */
611 return ptep_test_and_clear_young(vma, address, ptep);
612 }
613
614 #define pgprot_noncached pgprot_noncached
pgprot_noncached(pgprot_t _prot)615 static inline pgprot_t pgprot_noncached(pgprot_t _prot)
616 {
617 unsigned long prot = pgprot_val(_prot);
618
619 prot &= ~_PAGE_MTMASK;
620 prot |= _PAGE_IO;
621
622 return __pgprot(prot);
623 }
624
625 #define pgprot_writecombine pgprot_writecombine
pgprot_writecombine(pgprot_t _prot)626 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
627 {
628 unsigned long prot = pgprot_val(_prot);
629
630 prot &= ~_PAGE_MTMASK;
631 prot |= _PAGE_NOCACHE;
632
633 return __pgprot(prot);
634 }
635
636 /*
637 * THP functions
638 */
pte_pmd(pte_t pte)639 static inline pmd_t pte_pmd(pte_t pte)
640 {
641 return __pmd(pte_val(pte));
642 }
643
pmd_mkhuge(pmd_t pmd)644 static inline pmd_t pmd_mkhuge(pmd_t pmd)
645 {
646 return pmd;
647 }
648
pmd_mkinvalid(pmd_t pmd)649 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
650 {
651 return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
652 }
653
654 #define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
655
pmd_pfn(pmd_t pmd)656 static inline unsigned long pmd_pfn(pmd_t pmd)
657 {
658 return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
659 }
660
661 #define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
662
pud_pfn(pud_t pud)663 static inline unsigned long pud_pfn(pud_t pud)
664 {
665 return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
666 }
667
pmd_modify(pmd_t pmd,pgprot_t newprot)668 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
669 {
670 return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
671 }
672
673 #define pmd_write pmd_write
pmd_write(pmd_t pmd)674 static inline int pmd_write(pmd_t pmd)
675 {
676 return pte_write(pmd_pte(pmd));
677 }
678
pmd_dirty(pmd_t pmd)679 static inline int pmd_dirty(pmd_t pmd)
680 {
681 return pte_dirty(pmd_pte(pmd));
682 }
683
684 #define pmd_young pmd_young
pmd_young(pmd_t pmd)685 static inline int pmd_young(pmd_t pmd)
686 {
687 return pte_young(pmd_pte(pmd));
688 }
689
pmd_user(pmd_t pmd)690 static inline int pmd_user(pmd_t pmd)
691 {
692 return pte_user(pmd_pte(pmd));
693 }
694
pmd_mkold(pmd_t pmd)695 static inline pmd_t pmd_mkold(pmd_t pmd)
696 {
697 return pte_pmd(pte_mkold(pmd_pte(pmd)));
698 }
699
pmd_mkyoung(pmd_t pmd)700 static inline pmd_t pmd_mkyoung(pmd_t pmd)
701 {
702 return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
703 }
704
pmd_mkwrite_novma(pmd_t pmd)705 static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
706 {
707 return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)));
708 }
709
pmd_wrprotect(pmd_t pmd)710 static inline pmd_t pmd_wrprotect(pmd_t pmd)
711 {
712 return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
713 }
714
pmd_mkclean(pmd_t pmd)715 static inline pmd_t pmd_mkclean(pmd_t pmd)
716 {
717 return pte_pmd(pte_mkclean(pmd_pte(pmd)));
718 }
719
pmd_mkdirty(pmd_t pmd)720 static inline pmd_t pmd_mkdirty(pmd_t pmd)
721 {
722 return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
723 }
724
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)725 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
726 pmd_t *pmdp, pmd_t pmd)
727 {
728 page_table_check_pmd_set(mm, pmdp, pmd);
729 return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd));
730 }
731
set_pud_at(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t pud)732 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
733 pud_t *pudp, pud_t pud)
734 {
735 page_table_check_pud_set(mm, pudp, pud);
736 return __set_pte_at((pte_t *)pudp, pud_pte(pud));
737 }
738
739 #ifdef CONFIG_PAGE_TABLE_CHECK
pte_user_accessible_page(pte_t pte)740 static inline bool pte_user_accessible_page(pte_t pte)
741 {
742 return pte_present(pte) && pte_user(pte);
743 }
744
pmd_user_accessible_page(pmd_t pmd)745 static inline bool pmd_user_accessible_page(pmd_t pmd)
746 {
747 return pmd_leaf(pmd) && pmd_user(pmd);
748 }
749
pud_user_accessible_page(pud_t pud)750 static inline bool pud_user_accessible_page(pud_t pud)
751 {
752 return pud_leaf(pud) && pud_user(pud);
753 }
754 #endif
755
756 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_trans_huge(pmd_t pmd)757 static inline int pmd_trans_huge(pmd_t pmd)
758 {
759 return pmd_leaf(pmd);
760 }
761
762 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)763 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
764 unsigned long address, pmd_t *pmdp,
765 pmd_t entry, int dirty)
766 {
767 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
768 }
769
770 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
pmdp_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)771 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
772 unsigned long address, pmd_t *pmdp)
773 {
774 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
775 }
776
777 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)778 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
779 unsigned long address, pmd_t *pmdp)
780 {
781 pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0));
782
783 page_table_check_pmd_clear(mm, pmd);
784
785 return pmd;
786 }
787
788 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)789 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
790 unsigned long address, pmd_t *pmdp)
791 {
792 ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
793 }
794
795 #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)796 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
797 unsigned long address, pmd_t *pmdp, pmd_t pmd)
798 {
799 page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
800 return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
801 }
802
803 #define pmdp_collapse_flush pmdp_collapse_flush
804 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
805 unsigned long address, pmd_t *pmdp);
806 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
807
808 /*
809 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
810 * are !pte_none() && !pte_present().
811 *
812 * Format of swap PTE:
813 * bit 0: _PAGE_PRESENT (zero)
814 * bit 1 to 3: _PAGE_LEAF (zero)
815 * bit 5: _PAGE_PROT_NONE (zero)
816 * bit 6: exclusive marker
817 * bits 7 to 11: swap type
818 * bits 11 to XLEN-1: swap offset
819 */
820 #define __SWP_TYPE_SHIFT 7
821 #define __SWP_TYPE_BITS 5
822 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
823 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
824
825 #define MAX_SWAPFILES_CHECK() \
826 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
827
828 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
829 #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
830 #define __swp_entry(type, offset) ((swp_entry_t) \
831 { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
832 ((offset) << __SWP_OFFSET_SHIFT) })
833
834 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
835 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
836
pte_swp_exclusive(pte_t pte)837 static inline int pte_swp_exclusive(pte_t pte)
838 {
839 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
840 }
841
pte_swp_mkexclusive(pte_t pte)842 static inline pte_t pte_swp_mkexclusive(pte_t pte)
843 {
844 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
845 }
846
pte_swp_clear_exclusive(pte_t pte)847 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
848 {
849 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
850 }
851
852 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
853 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
854 #define __swp_entry_to_pmd(swp) __pmd((swp).val)
855 #endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
856
857 /*
858 * In the RV64 Linux scheme, we give the user half of the virtual-address space
859 * and give the kernel the other (upper) half.
860 */
861 #ifdef CONFIG_64BIT
862 #define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
863 #else
864 #define KERN_VIRT_START FIXADDR_START
865 #endif
866
867 /*
868 * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
869 * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
870 * Task size is:
871 * - 0x9fc00000 (~2.5GB) for RV32.
872 * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
873 * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
874 * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu
875 *
876 * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
877 * Instruction Set Manual Volume II: Privileged Architecture" states that
878 * "load and store effective addresses, which are 64bits, must have bits
879 * 63–48 all equal to bit 47, or else a page-fault exception will occur."
880 * Similarly for SV57, bits 63–57 must be equal to bit 56.
881 */
882 #ifdef CONFIG_64BIT
883 #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
884 #define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
885
886 #ifdef CONFIG_COMPAT
887 #define TASK_SIZE_32 (_AC(0x80000000, UL))
888 #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
889 TASK_SIZE_32 : TASK_SIZE_64)
890 #else
891 #define TASK_SIZE TASK_SIZE_64
892 #endif
893
894 #else
895 #define TASK_SIZE FIXADDR_START
896 #define TASK_SIZE_MIN TASK_SIZE
897 #endif
898
899 #else /* CONFIG_MMU */
900
901 #define PAGE_SHARED __pgprot(0)
902 #define PAGE_KERNEL __pgprot(0)
903 #define swapper_pg_dir NULL
904 #define TASK_SIZE 0xffffffffUL
905 #define VMALLOC_START 0
906 #define VMALLOC_END TASK_SIZE
907
908 #endif /* !CONFIG_MMU */
909
910 extern char _start[];
911 extern void *_dtb_early_va;
912 extern uintptr_t _dtb_early_pa;
913 #if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
914 #define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
915 #define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
916 #else
917 #define dtb_early_va _dtb_early_va
918 #define dtb_early_pa _dtb_early_pa
919 #endif /* CONFIG_XIP_KERNEL */
920 extern u64 satp_mode;
921 extern bool pgtable_l4_enabled;
922
923 void paging_init(void);
924 void misc_mem_init(void);
925
926 /*
927 * ZERO_PAGE is a global shared page that is always zero,
928 * used for zero-mapped memory areas, etc.
929 */
930 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
931 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
932
933 #endif /* !__ASSEMBLY__ */
934
935 #endif /* _ASM_RISCV_PGTABLE_H */
936