1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_DEFS_H
3 #define _ASM_X86_PGTABLE_DEFS_H
4
5 #include <linux/const.h>
6 #include <linux/mem_encrypt.h>
7
8 #include <asm/page_types.h>
9
10 #define _PAGE_BIT_PRESENT 0 /* is present */
11 #define _PAGE_BIT_RW 1 /* writeable */
12 #define _PAGE_BIT_USER 2 /* userspace addressable */
13 #define _PAGE_BIT_PWT 3 /* page write through */
14 #define _PAGE_BIT_PCD 4 /* page cache disabled */
15 #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
16 #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
17 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
18 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
19 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
20 #define _PAGE_BIT_SOFTW1 9 /* available for programmer */
21 #define _PAGE_BIT_SOFTW2 10 /* " */
22 #define _PAGE_BIT_SOFTW3 11 /* " */
23 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
24 #define _PAGE_BIT_SOFTW4 58 /* available for programmer */
25 #define _PAGE_BIT_PKEY_BIT0 59 /* Protection Keys, bit 1/4 */
26 #define _PAGE_BIT_PKEY_BIT1 60 /* Protection Keys, bit 2/4 */
27 #define _PAGE_BIT_PKEY_BIT2 61 /* Protection Keys, bit 3/4 */
28 #define _PAGE_BIT_PKEY_BIT3 62 /* Protection Keys, bit 4/4 */
29 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
30
31 #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
32 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
33 #define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */
34 #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
35 #define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
36
37 /* If _PAGE_BIT_PRESENT is clear, we use these: */
38 /* - if the user mapped it with PROT_NONE; pte_present gives true */
39 #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
40
41 #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
42 #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
43 #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
44 #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
45 #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
46 #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
47 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
48 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
49 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
50 #define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
51 #define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
52 #define _PAGE_SOFTW3 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW3)
53 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
54 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
55 #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
56 #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
57 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
58 #define _PAGE_PKEY_BIT0 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0)
59 #define _PAGE_PKEY_BIT1 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1)
60 #define _PAGE_PKEY_BIT2 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2)
61 #define _PAGE_PKEY_BIT3 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3)
62 #else
63 #define _PAGE_PKEY_BIT0 (_AT(pteval_t, 0))
64 #define _PAGE_PKEY_BIT1 (_AT(pteval_t, 0))
65 #define _PAGE_PKEY_BIT2 (_AT(pteval_t, 0))
66 #define _PAGE_PKEY_BIT3 (_AT(pteval_t, 0))
67 #endif
68
69 #define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \
70 _PAGE_PKEY_BIT1 | \
71 _PAGE_PKEY_BIT2 | \
72 _PAGE_PKEY_BIT3)
73
74 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
75 #define _PAGE_KNL_ERRATUM_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
76 #else
77 #define _PAGE_KNL_ERRATUM_MASK 0
78 #endif
79
80 #ifdef CONFIG_MEM_SOFT_DIRTY
81 #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
82 #else
83 #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
84 #endif
85
86 /*
87 * Tracking soft dirty bit when a page goes to a swap is tricky.
88 * We need a bit which can be stored in pte _and_ not conflict
89 * with swap entry format. On x86 bits 1-4 are *not* involved
90 * into swap entry computation, but bit 7 is used for thp migration,
91 * so we borrow bit 1 for soft dirty tracking.
92 *
93 * Please note that this bit must be treated as swap dirty page
94 * mark if and only if the PTE/PMD has present bit clear!
95 */
96 #ifdef CONFIG_MEM_SOFT_DIRTY
97 #define _PAGE_SWP_SOFT_DIRTY _PAGE_RW
98 #else
99 #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
100 #endif
101
102 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
103 #define _PAGE_UFFD_WP (_AT(pteval_t, 1) << _PAGE_BIT_UFFD_WP)
104 #define _PAGE_SWP_UFFD_WP _PAGE_USER
105 #else
106 #define _PAGE_UFFD_WP (_AT(pteval_t, 0))
107 #define _PAGE_SWP_UFFD_WP (_AT(pteval_t, 0))
108 #endif
109
110 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
111 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
112 #define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP)
113 #define _PAGE_SOFTW4 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW4)
114 #else
115 #define _PAGE_NX (_AT(pteval_t, 0))
116 #define _PAGE_DEVMAP (_AT(pteval_t, 0))
117 #define _PAGE_SOFTW4 (_AT(pteval_t, 0))
118 #endif
119
120 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
121
122 /*
123 * Set of bits not changed in pte_modify. The pte's
124 * protection key is treated like _PAGE_RW, for
125 * instance, and is *not* included in this mask since
126 * pte_modify() does modify it.
127 */
128 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
129 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
130 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
131 _PAGE_UFFD_WP)
132 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
133
134 /*
135 * The cache modes defined here are used to translate between pure SW usage
136 * and the HW defined cache mode bits and/or PAT entries.
137 *
138 * The resulting bits for PWT, PCD and PAT should be chosen in a way
139 * to have the WB mode at index 0 (all bits clear). This is the default
140 * right now and likely would break too much if changed.
141 */
142 #ifndef __ASSEMBLY__
143 enum page_cache_mode {
144 _PAGE_CACHE_MODE_WB = 0,
145 _PAGE_CACHE_MODE_WC = 1,
146 _PAGE_CACHE_MODE_UC_MINUS = 2,
147 _PAGE_CACHE_MODE_UC = 3,
148 _PAGE_CACHE_MODE_WT = 4,
149 _PAGE_CACHE_MODE_WP = 5,
150
151 _PAGE_CACHE_MODE_NUM = 8
152 };
153 #endif
154
155 #define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
156
157 #define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
158 #define _PAGE_LARGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE)
159
160 #define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC))
161 #define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP))
162
163 #define __PP _PAGE_PRESENT
164 #define __RW _PAGE_RW
165 #define _USR _PAGE_USER
166 #define ___A _PAGE_ACCESSED
167 #define ___D _PAGE_DIRTY
168 #define ___G _PAGE_GLOBAL
169 #define __NX _PAGE_NX
170
171 #define _ENC _PAGE_ENC
172 #define __WP _PAGE_CACHE_WP
173 #define __NC _PAGE_NOCACHE
174 #define _PSE _PAGE_PSE
175
176 #define pgprot_val(x) ((x).pgprot)
177 #define __pgprot(x) ((pgprot_t) { (x) } )
178 #define __pg(x) __pgprot(x)
179
180 #define PAGE_NONE __pg( 0| 0| 0|___A| 0| 0| 0|___G)
181 #define PAGE_SHARED __pg(__PP|__RW|_USR|___A|__NX| 0| 0| 0)
182 #define PAGE_SHARED_EXEC __pg(__PP|__RW|_USR|___A| 0| 0| 0| 0)
183 #define PAGE_COPY_NOEXEC __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0)
184 #define PAGE_COPY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0)
185 #define PAGE_COPY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0)
186 #define PAGE_READONLY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0)
187 #define PAGE_READONLY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0)
188
189 #define __PAGE_KERNEL (__PP|__RW| 0|___A|__NX|___D| 0|___G)
190 #define __PAGE_KERNEL_EXEC (__PP|__RW| 0|___A| 0|___D| 0|___G)
191 #define _KERNPG_TABLE_NOENC (__PP|__RW| 0|___A| 0|___D| 0| 0)
192 #define _KERNPG_TABLE (__PP|__RW| 0|___A| 0|___D| 0| 0| _ENC)
193 #define _PAGE_TABLE_NOENC (__PP|__RW|_USR|___A| 0|___D| 0| 0)
194 #define _PAGE_TABLE (__PP|__RW|_USR|___A| 0|___D| 0| 0| _ENC)
195 #define __PAGE_KERNEL_RO (__PP| 0| 0|___A|__NX|___D| 0|___G)
196 #define __PAGE_KERNEL_ROX (__PP| 0| 0|___A| 0|___D| 0|___G)
197 #define __PAGE_KERNEL_NOCACHE (__PP|__RW| 0|___A|__NX|___D| 0|___G| __NC)
198 #define __PAGE_KERNEL_VVAR (__PP| 0|_USR|___A|__NX|___D| 0|___G)
199 #define __PAGE_KERNEL_LARGE (__PP|__RW| 0|___A|__NX|___D|_PSE|___G)
200 #define __PAGE_KERNEL_LARGE_EXEC (__PP|__RW| 0|___A| 0|___D|_PSE|___G)
201 #define __PAGE_KERNEL_WP (__PP|__RW| 0|___A|__NX|___D| 0|___G| __WP)
202
203
204 #define __PAGE_KERNEL_IO __PAGE_KERNEL
205 #define __PAGE_KERNEL_IO_NOCACHE __PAGE_KERNEL_NOCACHE
206
207
208 #ifndef __ASSEMBLY__
209
210 #define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _ENC)
211 #define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _ENC)
212 #define __PAGE_KERNEL_NOENC (__PAGE_KERNEL | 0)
213 #define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP | 0)
214
215 #define __pgprot_mask(x) __pgprot((x) & __default_kernel_pte_mask)
216
217 #define PAGE_KERNEL __pgprot_mask(__PAGE_KERNEL | _ENC)
218 #define PAGE_KERNEL_NOENC __pgprot_mask(__PAGE_KERNEL | 0)
219 #define PAGE_KERNEL_RO __pgprot_mask(__PAGE_KERNEL_RO | _ENC)
220 #define PAGE_KERNEL_EXEC __pgprot_mask(__PAGE_KERNEL_EXEC | _ENC)
221 #define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC | 0)
222 #define PAGE_KERNEL_ROX __pgprot_mask(__PAGE_KERNEL_ROX | _ENC)
223 #define PAGE_KERNEL_NOCACHE __pgprot_mask(__PAGE_KERNEL_NOCACHE | _ENC)
224 #define PAGE_KERNEL_LARGE __pgprot_mask(__PAGE_KERNEL_LARGE | _ENC)
225 #define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC)
226 #define PAGE_KERNEL_VVAR __pgprot_mask(__PAGE_KERNEL_VVAR | _ENC)
227
228 #define PAGE_KERNEL_IO __pgprot_mask(__PAGE_KERNEL_IO)
229 #define PAGE_KERNEL_IO_NOCACHE __pgprot_mask(__PAGE_KERNEL_IO_NOCACHE)
230
231 #endif /* __ASSEMBLY__ */
232
233 /*
234 * early identity mapping pte attrib macros.
235 */
236 #ifdef CONFIG_X86_64
237 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
238 #else
239 #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
240 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
241 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
242 #endif
243
244 #ifdef CONFIG_X86_32
245 # include <asm/pgtable_32_types.h>
246 #else
247 # include <asm/pgtable_64_types.h>
248 #endif
249
250 #ifndef __ASSEMBLY__
251
252 #include <linux/types.h>
253
254 /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
255 #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
256
257 /*
258 * Extracts the flags from a (pte|pmd|pud|pgd)val_t
259 * This includes the protection key value.
260 */
261 #define PTE_FLAGS_MASK (~PTE_PFN_MASK)
262
263 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
264
265 typedef struct { pgdval_t pgd; } pgd_t;
266
pgprot_nx(pgprot_t prot)267 static inline pgprot_t pgprot_nx(pgprot_t prot)
268 {
269 return __pgprot(pgprot_val(prot) | _PAGE_NX);
270 }
271 #define pgprot_nx pgprot_nx
272
273 #ifdef CONFIG_X86_PAE
274
275 /*
276 * PHYSICAL_PAGE_MASK might be non-constant when SME is compiled in, so we can't
277 * use it here.
278 */
279
280 #define PGD_PAE_PAGE_MASK ((signed long)PAGE_MASK)
281 #define PGD_PAE_PHYS_MASK (((1ULL << __PHYSICAL_MASK_SHIFT)-1) & PGD_PAE_PAGE_MASK)
282
283 /*
284 * PAE allows Base Address, P, PWT, PCD and AVL bits to be set in PGD entries.
285 * All other bits are Reserved MBZ
286 */
287 #define PGD_ALLOWED_BITS (PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \
288 _PAGE_PWT | _PAGE_PCD | \
289 _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3)
290
291 #else
292 /* No need to mask any bits for !PAE */
293 #define PGD_ALLOWED_BITS (~0ULL)
294 #endif
295
native_make_pgd(pgdval_t val)296 static inline pgd_t native_make_pgd(pgdval_t val)
297 {
298 return (pgd_t) { val & PGD_ALLOWED_BITS };
299 }
300
native_pgd_val(pgd_t pgd)301 static inline pgdval_t native_pgd_val(pgd_t pgd)
302 {
303 return pgd.pgd & PGD_ALLOWED_BITS;
304 }
305
pgd_flags(pgd_t pgd)306 static inline pgdval_t pgd_flags(pgd_t pgd)
307 {
308 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
309 }
310
311 #if CONFIG_PGTABLE_LEVELS > 4
312 typedef struct { p4dval_t p4d; } p4d_t;
313
native_make_p4d(pudval_t val)314 static inline p4d_t native_make_p4d(pudval_t val)
315 {
316 return (p4d_t) { val };
317 }
318
native_p4d_val(p4d_t p4d)319 static inline p4dval_t native_p4d_val(p4d_t p4d)
320 {
321 return p4d.p4d;
322 }
323 #else
324 #include <asm-generic/pgtable-nop4d.h>
325
native_make_p4d(pudval_t val)326 static inline p4d_t native_make_p4d(pudval_t val)
327 {
328 return (p4d_t) { .pgd = native_make_pgd((pgdval_t)val) };
329 }
330
native_p4d_val(p4d_t p4d)331 static inline p4dval_t native_p4d_val(p4d_t p4d)
332 {
333 return native_pgd_val(p4d.pgd);
334 }
335 #endif
336
337 #if CONFIG_PGTABLE_LEVELS > 3
338 typedef struct { pudval_t pud; } pud_t;
339
native_make_pud(pmdval_t val)340 static inline pud_t native_make_pud(pmdval_t val)
341 {
342 return (pud_t) { val };
343 }
344
native_pud_val(pud_t pud)345 static inline pudval_t native_pud_val(pud_t pud)
346 {
347 return pud.pud;
348 }
349 #else
350 #include <asm-generic/pgtable-nopud.h>
351
native_make_pud(pudval_t val)352 static inline pud_t native_make_pud(pudval_t val)
353 {
354 return (pud_t) { .p4d.pgd = native_make_pgd(val) };
355 }
356
native_pud_val(pud_t pud)357 static inline pudval_t native_pud_val(pud_t pud)
358 {
359 return native_pgd_val(pud.p4d.pgd);
360 }
361 #endif
362
363 #if CONFIG_PGTABLE_LEVELS > 2
364 typedef struct { pmdval_t pmd; } pmd_t;
365
native_make_pmd(pmdval_t val)366 static inline pmd_t native_make_pmd(pmdval_t val)
367 {
368 return (pmd_t) { val };
369 }
370
native_pmd_val(pmd_t pmd)371 static inline pmdval_t native_pmd_val(pmd_t pmd)
372 {
373 return pmd.pmd;
374 }
375 #else
376 #include <asm-generic/pgtable-nopmd.h>
377
native_make_pmd(pmdval_t val)378 static inline pmd_t native_make_pmd(pmdval_t val)
379 {
380 return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
381 }
382
native_pmd_val(pmd_t pmd)383 static inline pmdval_t native_pmd_val(pmd_t pmd)
384 {
385 return native_pgd_val(pmd.pud.p4d.pgd);
386 }
387 #endif
388
p4d_pfn_mask(p4d_t p4d)389 static inline p4dval_t p4d_pfn_mask(p4d_t p4d)
390 {
391 /* No 512 GiB huge pages yet */
392 return PTE_PFN_MASK;
393 }
394
p4d_flags_mask(p4d_t p4d)395 static inline p4dval_t p4d_flags_mask(p4d_t p4d)
396 {
397 return ~p4d_pfn_mask(p4d);
398 }
399
p4d_flags(p4d_t p4d)400 static inline p4dval_t p4d_flags(p4d_t p4d)
401 {
402 return native_p4d_val(p4d) & p4d_flags_mask(p4d);
403 }
404
pud_pfn_mask(pud_t pud)405 static inline pudval_t pud_pfn_mask(pud_t pud)
406 {
407 if (native_pud_val(pud) & _PAGE_PSE)
408 return PHYSICAL_PUD_PAGE_MASK;
409 else
410 return PTE_PFN_MASK;
411 }
412
pud_flags_mask(pud_t pud)413 static inline pudval_t pud_flags_mask(pud_t pud)
414 {
415 return ~pud_pfn_mask(pud);
416 }
417
pud_flags(pud_t pud)418 static inline pudval_t pud_flags(pud_t pud)
419 {
420 return native_pud_val(pud) & pud_flags_mask(pud);
421 }
422
pmd_pfn_mask(pmd_t pmd)423 static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
424 {
425 if (native_pmd_val(pmd) & _PAGE_PSE)
426 return PHYSICAL_PMD_PAGE_MASK;
427 else
428 return PTE_PFN_MASK;
429 }
430
pmd_flags_mask(pmd_t pmd)431 static inline pmdval_t pmd_flags_mask(pmd_t pmd)
432 {
433 return ~pmd_pfn_mask(pmd);
434 }
435
pmd_flags(pmd_t pmd)436 static inline pmdval_t pmd_flags(pmd_t pmd)
437 {
438 return native_pmd_val(pmd) & pmd_flags_mask(pmd);
439 }
440
native_make_pte(pteval_t val)441 static inline pte_t native_make_pte(pteval_t val)
442 {
443 return (pte_t) { .pte = val };
444 }
445
native_pte_val(pte_t pte)446 static inline pteval_t native_pte_val(pte_t pte)
447 {
448 return pte.pte;
449 }
450
pte_flags(pte_t pte)451 static inline pteval_t pte_flags(pte_t pte)
452 {
453 return native_pte_val(pte) & PTE_FLAGS_MASK;
454 }
455
456 #define __pte2cm_idx(cb) \
457 ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \
458 (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \
459 (((cb) >> _PAGE_BIT_PWT) & 1))
460 #define __cm_idx2pte(i) \
461 ((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \
462 (((i) & 2) << (_PAGE_BIT_PCD - 1)) | \
463 (((i) & 1) << _PAGE_BIT_PWT))
464
465 unsigned long cachemode2protval(enum page_cache_mode pcm);
466
protval_4k_2_large(pgprotval_t val)467 static inline pgprotval_t protval_4k_2_large(pgprotval_t val)
468 {
469 return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
470 ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
471 }
pgprot_4k_2_large(pgprot_t pgprot)472 static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
473 {
474 return __pgprot(protval_4k_2_large(pgprot_val(pgprot)));
475 }
protval_large_2_4k(pgprotval_t val)476 static inline pgprotval_t protval_large_2_4k(pgprotval_t val)
477 {
478 return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
479 ((val & _PAGE_PAT_LARGE) >>
480 (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
481 }
pgprot_large_2_4k(pgprot_t pgprot)482 static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
483 {
484 return __pgprot(protval_large_2_4k(pgprot_val(pgprot)));
485 }
486
487
488 typedef struct page *pgtable_t;
489
490 extern pteval_t __supported_pte_mask;
491 extern pteval_t __default_kernel_pte_mask;
492 extern void set_nx(void);
493 extern int nx_enabled;
494
495 #define pgprot_writecombine pgprot_writecombine
496 extern pgprot_t pgprot_writecombine(pgprot_t prot);
497
498 #define pgprot_writethrough pgprot_writethrough
499 extern pgprot_t pgprot_writethrough(pgprot_t prot);
500
501 /* Indicate that x86 has its own track and untrack pfn vma functions */
502 #define __HAVE_PFNMAP_TRACKING
503
504 #define __HAVE_PHYS_MEM_ACCESS_PROT
505 struct file;
506 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
507 unsigned long size, pgprot_t vma_prot);
508
509 /* Install a pte for a particular vaddr in kernel space. */
510 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
511
512 #ifdef CONFIG_X86_32
513 extern void native_pagetable_init(void);
514 #else
515 #define native_pagetable_init paging_init
516 #endif
517
518 struct seq_file;
519 extern void arch_report_meminfo(struct seq_file *m);
520
521 enum pg_level {
522 PG_LEVEL_NONE,
523 PG_LEVEL_4K,
524 PG_LEVEL_2M,
525 PG_LEVEL_1G,
526 PG_LEVEL_512G,
527 PG_LEVEL_NUM
528 };
529
530 #ifdef CONFIG_PROC_FS
531 extern void update_page_count(int level, unsigned long pages);
532 #else
update_page_count(int level,unsigned long pages)533 static inline void update_page_count(int level, unsigned long pages) { }
534 #endif
535
536 /*
537 * Helper function that returns the kernel pagetable entry controlling
538 * the virtual address 'address'. NULL means no pagetable entry present.
539 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
540 * as a pte too.
541 */
542 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
543 extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
544 unsigned int *level);
545 extern pmd_t *lookup_pmd_address(unsigned long address);
546 extern phys_addr_t slow_virt_to_phys(void *__address);
547 extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
548 unsigned long address,
549 unsigned numpages,
550 unsigned long page_flags);
551 extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
552 unsigned long numpages);
553 #endif /* !__ASSEMBLY__ */
554
555 #endif /* _ASM_X86_PGTABLE_DEFS_H */
556