1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 */
5
6 #ifndef _ASM_RISCV_PGTABLE_64_H
7 #define _ASM_RISCV_PGTABLE_64_H
8
9 #include <linux/bits.h>
10 #include <linux/const.h>
11 #include <asm/errata_list.h>
12
13 extern bool pgtable_l4_enabled;
14 extern bool pgtable_l5_enabled;
15
16 #define PGDIR_SHIFT_L3 30
17 #define PGDIR_SHIFT_L4 39
18 #define PGDIR_SHIFT_L5 48
19 #define PGDIR_SIZE_L3 (_AC(1, UL) << PGDIR_SHIFT_L3)
20
21 #define PGDIR_SHIFT (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \
22 (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3))
23 /* Size of region mapped by a page global directory */
24 #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
25 #define PGDIR_MASK (~(PGDIR_SIZE - 1))
26
27 /* p4d is folded into pgd in case of 4-level page table */
28 #define P4D_SHIFT_L3 30
29 #define P4D_SHIFT_L4 39
30 #define P4D_SHIFT_L5 39
31 #define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \
32 (pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3))
33 #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
34 #define P4D_MASK (~(P4D_SIZE - 1))
35
36 /* pud is folded into pgd in case of 3-level page table */
37 #define PUD_SHIFT 30
38 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
39 #define PUD_MASK (~(PUD_SIZE - 1))
40
41 #define PMD_SHIFT 21
42 /* Size of region mapped by a page middle directory */
43 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
44 #define PMD_MASK (~(PMD_SIZE - 1))
45
46 /* Page 4th Directory entry */
47 typedef struct {
48 unsigned long p4d;
49 } p4d_t;
50
51 #define p4d_val(x) ((x).p4d)
52 #define __p4d(x) ((p4d_t) { (x) })
53 #define PTRS_PER_P4D (PAGE_SIZE / sizeof(p4d_t))
54
55 /* Page Upper Directory entry */
56 typedef struct {
57 unsigned long pud;
58 } pud_t;
59
60 #define pud_val(x) ((x).pud)
61 #define __pud(x) ((pud_t) { (x) })
62 #define PTRS_PER_PUD (PAGE_SIZE / sizeof(pud_t))
63
64 /* Page Middle Directory entry */
65 typedef struct {
66 unsigned long pmd;
67 } pmd_t;
68
69 #define pmd_val(x) ((x).pmd)
70 #define __pmd(x) ((pmd_t) { (x) })
71
72 #define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
73
74 /*
75 * rv64 PTE format:
76 * | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
77 * N MT RSV PFN reserved for SW D A G U X W R V
78 */
79 #define _PAGE_PFN_MASK GENMASK(53, 10)
80
81 /*
82 * [63] Svnapot definitions:
83 * 0 Svnapot disabled
84 * 1 Svnapot enabled
85 */
86 #define _PAGE_NAPOT_SHIFT 63
87 #define _PAGE_NAPOT BIT(_PAGE_NAPOT_SHIFT)
88 /*
89 * Only 64KB (order 4) napot ptes supported.
90 */
91 #define NAPOT_CONT_ORDER_BASE 4
92 enum napot_cont_order {
93 NAPOT_CONT64KB_ORDER = NAPOT_CONT_ORDER_BASE,
94 NAPOT_ORDER_MAX,
95 };
96
97 #define for_each_napot_order(order) \
98 for (order = NAPOT_CONT_ORDER_BASE; order < NAPOT_ORDER_MAX; order++)
99 #define for_each_napot_order_rev(order) \
100 for (order = NAPOT_ORDER_MAX - 1; \
101 order >= NAPOT_CONT_ORDER_BASE; order--)
102 #define napot_cont_order(val) (__builtin_ctzl((val.pte >> _PAGE_PFN_SHIFT) << 1))
103
104 #define napot_cont_shift(order) ((order) + PAGE_SHIFT)
105 #define napot_cont_size(order) BIT(napot_cont_shift(order))
106 #define napot_cont_mask(order) (~(napot_cont_size(order) - 1UL))
107 #define napot_pte_num(order) BIT(order)
108
109 #ifdef CONFIG_RISCV_ISA_SVNAPOT
110 #define HUGE_MAX_HSTATE (2 + (NAPOT_ORDER_MAX - NAPOT_CONT_ORDER_BASE))
111 #else
112 #define HUGE_MAX_HSTATE 2
113 #endif
114
115 /*
116 * [62:61] Svpbmt Memory Type definitions:
117 *
118 * 00 - PMA Normal Cacheable, No change to implied PMA memory type
119 * 01 - NC Non-cacheable, idempotent, weakly-ordered Main Memory
120 * 10 - IO Non-cacheable, non-idempotent, strongly-ordered I/O memory
121 * 11 - Rsvd Reserved for future standard use
122 */
123 #define _PAGE_NOCACHE_SVPBMT (1UL << 61)
124 #define _PAGE_IO_SVPBMT (1UL << 62)
125 #define _PAGE_MTMASK_SVPBMT (_PAGE_NOCACHE_SVPBMT | _PAGE_IO_SVPBMT)
126
127 /*
128 * [63:59] T-Head Memory Type definitions:
129 *
130 * 00000 - NC Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
131 * 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable
132 * 10000 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
133 */
134 #define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
135 #define _PAGE_NOCACHE_THEAD 0UL
136 #define _PAGE_IO_THEAD (1UL << 63)
137 #define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
138
riscv_page_mtmask(void)139 static inline u64 riscv_page_mtmask(void)
140 {
141 u64 val;
142
143 ALT_SVPBMT(val, _PAGE_MTMASK);
144 return val;
145 }
146
riscv_page_nocache(void)147 static inline u64 riscv_page_nocache(void)
148 {
149 u64 val;
150
151 ALT_SVPBMT(val, _PAGE_NOCACHE);
152 return val;
153 }
154
riscv_page_io(void)155 static inline u64 riscv_page_io(void)
156 {
157 u64 val;
158
159 ALT_SVPBMT(val, _PAGE_IO);
160 return val;
161 }
162
163 #define _PAGE_NOCACHE riscv_page_nocache()
164 #define _PAGE_IO riscv_page_io()
165 #define _PAGE_MTMASK riscv_page_mtmask()
166
167 /* Set of bits to preserve across pte_modify() */
168 #define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
169 _PAGE_WRITE | _PAGE_EXEC | \
170 _PAGE_USER | _PAGE_GLOBAL | \
171 _PAGE_MTMASK))
172
pud_present(pud_t pud)173 static inline int pud_present(pud_t pud)
174 {
175 return (pud_val(pud) & _PAGE_PRESENT);
176 }
177
pud_none(pud_t pud)178 static inline int pud_none(pud_t pud)
179 {
180 return (pud_val(pud) == 0);
181 }
182
pud_bad(pud_t pud)183 static inline int pud_bad(pud_t pud)
184 {
185 return !pud_present(pud);
186 }
187
188 #define pud_leaf pud_leaf
pud_leaf(pud_t pud)189 static inline int pud_leaf(pud_t pud)
190 {
191 return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF);
192 }
193
pud_user(pud_t pud)194 static inline int pud_user(pud_t pud)
195 {
196 return pud_val(pud) & _PAGE_USER;
197 }
198
set_pud(pud_t * pudp,pud_t pud)199 static inline void set_pud(pud_t *pudp, pud_t pud)
200 {
201 *pudp = pud;
202 }
203
pud_clear(pud_t * pudp)204 static inline void pud_clear(pud_t *pudp)
205 {
206 set_pud(pudp, __pud(0));
207 }
208
pfn_pud(unsigned long pfn,pgprot_t prot)209 static inline pud_t pfn_pud(unsigned long pfn, pgprot_t prot)
210 {
211 return __pud((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
212 }
213
_pud_pfn(pud_t pud)214 static inline unsigned long _pud_pfn(pud_t pud)
215 {
216 return __page_val_to_pfn(pud_val(pud));
217 }
218
pud_pgtable(pud_t pud)219 static inline pmd_t *pud_pgtable(pud_t pud)
220 {
221 return (pmd_t *)pfn_to_virt(__page_val_to_pfn(pud_val(pud)));
222 }
223
pud_page(pud_t pud)224 static inline struct page *pud_page(pud_t pud)
225 {
226 return pfn_to_page(__page_val_to_pfn(pud_val(pud)));
227 }
228
229 #define mm_p4d_folded mm_p4d_folded
mm_p4d_folded(struct mm_struct * mm)230 static inline bool mm_p4d_folded(struct mm_struct *mm)
231 {
232 if (pgtable_l5_enabled)
233 return false;
234
235 return true;
236 }
237
238 #define mm_pud_folded mm_pud_folded
mm_pud_folded(struct mm_struct * mm)239 static inline bool mm_pud_folded(struct mm_struct *mm)
240 {
241 if (pgtable_l4_enabled)
242 return false;
243
244 return true;
245 }
246
247 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
248
pfn_pmd(unsigned long pfn,pgprot_t prot)249 static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
250 {
251 unsigned long prot_val = pgprot_val(prot);
252
253 ALT_THEAD_PMA(prot_val);
254
255 return __pmd((pfn << _PAGE_PFN_SHIFT) | prot_val);
256 }
257
_pmd_pfn(pmd_t pmd)258 static inline unsigned long _pmd_pfn(pmd_t pmd)
259 {
260 return __page_val_to_pfn(pmd_val(pmd));
261 }
262
263 #define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
264
265 #define pmd_ERROR(e) \
266 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
267
268 #define pud_ERROR(e) \
269 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
270
271 #define p4d_ERROR(e) \
272 pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
273
set_p4d(p4d_t * p4dp,p4d_t p4d)274 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
275 {
276 if (pgtable_l4_enabled)
277 *p4dp = p4d;
278 else
279 set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
280 }
281
p4d_none(p4d_t p4d)282 static inline int p4d_none(p4d_t p4d)
283 {
284 if (pgtable_l4_enabled)
285 return (p4d_val(p4d) == 0);
286
287 return 0;
288 }
289
p4d_present(p4d_t p4d)290 static inline int p4d_present(p4d_t p4d)
291 {
292 if (pgtable_l4_enabled)
293 return (p4d_val(p4d) & _PAGE_PRESENT);
294
295 return 1;
296 }
297
p4d_bad(p4d_t p4d)298 static inline int p4d_bad(p4d_t p4d)
299 {
300 if (pgtable_l4_enabled)
301 return !p4d_present(p4d);
302
303 return 0;
304 }
305
p4d_clear(p4d_t * p4d)306 static inline void p4d_clear(p4d_t *p4d)
307 {
308 if (pgtable_l4_enabled)
309 set_p4d(p4d, __p4d(0));
310 }
311
pfn_p4d(unsigned long pfn,pgprot_t prot)312 static inline p4d_t pfn_p4d(unsigned long pfn, pgprot_t prot)
313 {
314 return __p4d((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
315 }
316
_p4d_pfn(p4d_t p4d)317 static inline unsigned long _p4d_pfn(p4d_t p4d)
318 {
319 return __page_val_to_pfn(p4d_val(p4d));
320 }
321
p4d_pgtable(p4d_t p4d)322 static inline pud_t *p4d_pgtable(p4d_t p4d)
323 {
324 if (pgtable_l4_enabled)
325 return (pud_t *)pfn_to_virt(__page_val_to_pfn(p4d_val(p4d)));
326
327 return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) });
328 }
329 #define p4d_page_vaddr(p4d) ((unsigned long)p4d_pgtable(p4d))
330
p4d_page(p4d_t p4d)331 static inline struct page *p4d_page(p4d_t p4d)
332 {
333 return pfn_to_page(__page_val_to_pfn(p4d_val(p4d)));
334 }
335
336 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
337
338 #define pud_offset pud_offset
pud_offset(p4d_t * p4d,unsigned long address)339 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
340 {
341 if (pgtable_l4_enabled)
342 return p4d_pgtable(*p4d) + pud_index(address);
343
344 return (pud_t *)p4d;
345 }
346
set_pgd(pgd_t * pgdp,pgd_t pgd)347 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
348 {
349 if (pgtable_l5_enabled)
350 *pgdp = pgd;
351 else
352 set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
353 }
354
pgd_none(pgd_t pgd)355 static inline int pgd_none(pgd_t pgd)
356 {
357 if (pgtable_l5_enabled)
358 return (pgd_val(pgd) == 0);
359
360 return 0;
361 }
362
pgd_present(pgd_t pgd)363 static inline int pgd_present(pgd_t pgd)
364 {
365 if (pgtable_l5_enabled)
366 return (pgd_val(pgd) & _PAGE_PRESENT);
367
368 return 1;
369 }
370
pgd_bad(pgd_t pgd)371 static inline int pgd_bad(pgd_t pgd)
372 {
373 if (pgtable_l5_enabled)
374 return !pgd_present(pgd);
375
376 return 0;
377 }
378
pgd_clear(pgd_t * pgd)379 static inline void pgd_clear(pgd_t *pgd)
380 {
381 if (pgtable_l5_enabled)
382 set_pgd(pgd, __pgd(0));
383 }
384
pgd_pgtable(pgd_t pgd)385 static inline p4d_t *pgd_pgtable(pgd_t pgd)
386 {
387 if (pgtable_l5_enabled)
388 return (p4d_t *)pfn_to_virt(__page_val_to_pfn(pgd_val(pgd)));
389
390 return (p4d_t *)p4d_pgtable((p4d_t) { pgd_val(pgd) });
391 }
392 #define pgd_page_vaddr(pgd) ((unsigned long)pgd_pgtable(pgd))
393
pgd_page(pgd_t pgd)394 static inline struct page *pgd_page(pgd_t pgd)
395 {
396 return pfn_to_page(__page_val_to_pfn(pgd_val(pgd)));
397 }
398 #define pgd_page(pgd) pgd_page(pgd)
399
400 #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
401
402 #define p4d_offset p4d_offset
p4d_offset(pgd_t * pgd,unsigned long address)403 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
404 {
405 if (pgtable_l5_enabled)
406 return pgd_pgtable(*pgd) + p4d_index(address);
407
408 return (p4d_t *)pgd;
409 }
410
411 #endif /* _ASM_RISCV_PGTABLE_64_H */
412