1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_H
3 #define _ASM_X86_PGTABLE_H
4
5 #include <linux/mem_encrypt.h>
6 #include <asm/page.h>
7 #include <asm/pgtable_types.h>
8
9 /*
10 * Macro to mark a page protection value as UC-
11 */
12 #define pgprot_noncached(prot) \
13 ((boot_cpu_data.x86 > 3) \
14 ? (__pgprot(pgprot_val(prot) | \
15 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \
16 : (prot))
17
18 #ifndef __ASSEMBLY__
19 #include <linux/spinlock.h>
20 #include <asm/x86_init.h>
21 #include <asm/pkru.h>
22 #include <asm/fpu/api.h>
23 #include <asm/coco.h>
24 #include <asm-generic/pgtable_uffd.h>
25 #include <linux/page_table_check.h>
26
27 extern pgd_t early_top_pgt[PTRS_PER_PGD];
28 bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
29
30 void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
31 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
32 bool user);
33 void ptdump_walk_pgd_level_checkwx(void);
34 void ptdump_walk_user_pgd_level_checkwx(void);
35
36 /*
37 * Macros to add or remove encryption attribute
38 */
39 #define pgprot_encrypted(prot) __pgprot(cc_mkenc(pgprot_val(prot)))
40 #define pgprot_decrypted(prot) __pgprot(cc_mkdec(pgprot_val(prot)))
41
42 #ifdef CONFIG_DEBUG_WX
43 #define debug_checkwx() ptdump_walk_pgd_level_checkwx()
44 #define debug_checkwx_user() ptdump_walk_user_pgd_level_checkwx()
45 #else
46 #define debug_checkwx() do { } while (0)
47 #define debug_checkwx_user() do { } while (0)
48 #endif
49
50 /*
51 * ZERO_PAGE is a global shared page that is always zero: used
52 * for zero-mapped memory areas etc..
53 */
54 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
55 __visible;
56 #define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
57
58 extern spinlock_t pgd_lock;
59 extern struct list_head pgd_list;
60
61 extern struct mm_struct *pgd_page_get_mm(struct page *page);
62
63 extern pmdval_t early_pmd_flags;
64
65 #ifdef CONFIG_PARAVIRT_XXL
66 #include <asm/paravirt.h>
67 #else /* !CONFIG_PARAVIRT_XXL */
68 #define set_pte(ptep, pte) native_set_pte(ptep, pte)
69
70 #define set_pte_atomic(ptep, pte) \
71 native_set_pte_atomic(ptep, pte)
72
73 #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
74
75 #ifndef __PAGETABLE_P4D_FOLDED
76 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
77 #define pgd_clear(pgd) (pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
78 #endif
79
80 #ifndef set_p4d
81 # define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d)
82 #endif
83
84 #ifndef __PAGETABLE_PUD_FOLDED
85 #define p4d_clear(p4d) native_p4d_clear(p4d)
86 #endif
87
88 #ifndef set_pud
89 # define set_pud(pudp, pud) native_set_pud(pudp, pud)
90 #endif
91
92 #ifndef __PAGETABLE_PUD_FOLDED
93 #define pud_clear(pud) native_pud_clear(pud)
94 #endif
95
96 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
97 #define pmd_clear(pmd) native_pmd_clear(pmd)
98
99 #define pgd_val(x) native_pgd_val(x)
100 #define __pgd(x) native_make_pgd(x)
101
102 #ifndef __PAGETABLE_P4D_FOLDED
103 #define p4d_val(x) native_p4d_val(x)
104 #define __p4d(x) native_make_p4d(x)
105 #endif
106
107 #ifndef __PAGETABLE_PUD_FOLDED
108 #define pud_val(x) native_pud_val(x)
109 #define __pud(x) native_make_pud(x)
110 #endif
111
112 #ifndef __PAGETABLE_PMD_FOLDED
113 #define pmd_val(x) native_pmd_val(x)
114 #define __pmd(x) native_make_pmd(x)
115 #endif
116
117 #define pte_val(x) native_pte_val(x)
118 #define __pte(x) native_make_pte(x)
119
120 #define arch_end_context_switch(prev) do {} while(0)
121 #endif /* CONFIG_PARAVIRT_XXL */
122
123 /*
124 * The following only work if pte_present() is true.
125 * Undefined behaviour if not..
126 */
pte_dirty(pte_t pte)127 static inline int pte_dirty(pte_t pte)
128 {
129 return pte_flags(pte) & _PAGE_DIRTY;
130 }
131
pte_young(pte_t pte)132 static inline int pte_young(pte_t pte)
133 {
134 return pte_flags(pte) & _PAGE_ACCESSED;
135 }
136
pmd_dirty(pmd_t pmd)137 static inline int pmd_dirty(pmd_t pmd)
138 {
139 return pmd_flags(pmd) & _PAGE_DIRTY;
140 }
141
142 #define pmd_young pmd_young
pmd_young(pmd_t pmd)143 static inline int pmd_young(pmd_t pmd)
144 {
145 return pmd_flags(pmd) & _PAGE_ACCESSED;
146 }
147
pud_dirty(pud_t pud)148 static inline int pud_dirty(pud_t pud)
149 {
150 return pud_flags(pud) & _PAGE_DIRTY;
151 }
152
pud_young(pud_t pud)153 static inline int pud_young(pud_t pud)
154 {
155 return pud_flags(pud) & _PAGE_ACCESSED;
156 }
157
pte_write(pte_t pte)158 static inline int pte_write(pte_t pte)
159 {
160 return pte_flags(pte) & _PAGE_RW;
161 }
162
pte_huge(pte_t pte)163 static inline int pte_huge(pte_t pte)
164 {
165 return pte_flags(pte) & _PAGE_PSE;
166 }
167
pte_global(pte_t pte)168 static inline int pte_global(pte_t pte)
169 {
170 return pte_flags(pte) & _PAGE_GLOBAL;
171 }
172
pte_exec(pte_t pte)173 static inline int pte_exec(pte_t pte)
174 {
175 return !(pte_flags(pte) & _PAGE_NX);
176 }
177
pte_special(pte_t pte)178 static inline int pte_special(pte_t pte)
179 {
180 return pte_flags(pte) & _PAGE_SPECIAL;
181 }
182
183 /* Entries that were set to PROT_NONE are inverted */
184
185 static inline u64 protnone_mask(u64 val);
186
pte_pfn(pte_t pte)187 static inline unsigned long pte_pfn(pte_t pte)
188 {
189 phys_addr_t pfn = pte_val(pte);
190 pfn ^= protnone_mask(pfn);
191 return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
192 }
193
pmd_pfn(pmd_t pmd)194 static inline unsigned long pmd_pfn(pmd_t pmd)
195 {
196 phys_addr_t pfn = pmd_val(pmd);
197 pfn ^= protnone_mask(pfn);
198 return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
199 }
200
pud_pfn(pud_t pud)201 static inline unsigned long pud_pfn(pud_t pud)
202 {
203 phys_addr_t pfn = pud_val(pud);
204 pfn ^= protnone_mask(pfn);
205 return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
206 }
207
p4d_pfn(p4d_t p4d)208 static inline unsigned long p4d_pfn(p4d_t p4d)
209 {
210 return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
211 }
212
pgd_pfn(pgd_t pgd)213 static inline unsigned long pgd_pfn(pgd_t pgd)
214 {
215 return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
216 }
217
218 #define p4d_leaf p4d_large
p4d_large(p4d_t p4d)219 static inline int p4d_large(p4d_t p4d)
220 {
221 /* No 512 GiB pages yet */
222 return 0;
223 }
224
225 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
226
227 #define pmd_leaf pmd_large
pmd_large(pmd_t pte)228 static inline int pmd_large(pmd_t pte)
229 {
230 return pmd_flags(pte) & _PAGE_PSE;
231 }
232
233 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
234 /* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
pmd_trans_huge(pmd_t pmd)235 static inline int pmd_trans_huge(pmd_t pmd)
236 {
237 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
238 }
239
240 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_trans_huge(pud_t pud)241 static inline int pud_trans_huge(pud_t pud)
242 {
243 return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
244 }
245 #endif
246
247 #define has_transparent_hugepage has_transparent_hugepage
has_transparent_hugepage(void)248 static inline int has_transparent_hugepage(void)
249 {
250 return boot_cpu_has(X86_FEATURE_PSE);
251 }
252
253 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
pmd_devmap(pmd_t pmd)254 static inline int pmd_devmap(pmd_t pmd)
255 {
256 return !!(pmd_val(pmd) & _PAGE_DEVMAP);
257 }
258
259 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pud_devmap(pud_t pud)260 static inline int pud_devmap(pud_t pud)
261 {
262 return !!(pud_val(pud) & _PAGE_DEVMAP);
263 }
264 #else
pud_devmap(pud_t pud)265 static inline int pud_devmap(pud_t pud)
266 {
267 return 0;
268 }
269 #endif
270
pgd_devmap(pgd_t pgd)271 static inline int pgd_devmap(pgd_t pgd)
272 {
273 return 0;
274 }
275 #endif
276 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
277
pte_set_flags(pte_t pte,pteval_t set)278 static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
279 {
280 pteval_t v = native_pte_val(pte);
281
282 return native_make_pte(v | set);
283 }
284
pte_clear_flags(pte_t pte,pteval_t clear)285 static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
286 {
287 pteval_t v = native_pte_val(pte);
288
289 return native_make_pte(v & ~clear);
290 }
291
292 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pte_uffd_wp(pte_t pte)293 static inline int pte_uffd_wp(pte_t pte)
294 {
295 return pte_flags(pte) & _PAGE_UFFD_WP;
296 }
297
pte_mkuffd_wp(pte_t pte)298 static inline pte_t pte_mkuffd_wp(pte_t pte)
299 {
300 return pte_set_flags(pte, _PAGE_UFFD_WP);
301 }
302
pte_clear_uffd_wp(pte_t pte)303 static inline pte_t pte_clear_uffd_wp(pte_t pte)
304 {
305 return pte_clear_flags(pte, _PAGE_UFFD_WP);
306 }
307 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
308
pte_mkclean(pte_t pte)309 static inline pte_t pte_mkclean(pte_t pte)
310 {
311 return pte_clear_flags(pte, _PAGE_DIRTY);
312 }
313
pte_mkold(pte_t pte)314 static inline pte_t pte_mkold(pte_t pte)
315 {
316 return pte_clear_flags(pte, _PAGE_ACCESSED);
317 }
318
pte_wrprotect(pte_t pte)319 static inline pte_t pte_wrprotect(pte_t pte)
320 {
321 return pte_clear_flags(pte, _PAGE_RW);
322 }
323
pte_mkexec(pte_t pte)324 static inline pte_t pte_mkexec(pte_t pte)
325 {
326 return pte_clear_flags(pte, _PAGE_NX);
327 }
328
pte_mkdirty(pte_t pte)329 static inline pte_t pte_mkdirty(pte_t pte)
330 {
331 return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
332 }
333
pte_mkyoung(pte_t pte)334 static inline pte_t pte_mkyoung(pte_t pte)
335 {
336 return pte_set_flags(pte, _PAGE_ACCESSED);
337 }
338
pte_mkwrite(pte_t pte)339 static inline pte_t pte_mkwrite(pte_t pte)
340 {
341 return pte_set_flags(pte, _PAGE_RW);
342 }
343
pte_mkhuge(pte_t pte)344 static inline pte_t pte_mkhuge(pte_t pte)
345 {
346 return pte_set_flags(pte, _PAGE_PSE);
347 }
348
pte_clrhuge(pte_t pte)349 static inline pte_t pte_clrhuge(pte_t pte)
350 {
351 return pte_clear_flags(pte, _PAGE_PSE);
352 }
353
pte_mkglobal(pte_t pte)354 static inline pte_t pte_mkglobal(pte_t pte)
355 {
356 return pte_set_flags(pte, _PAGE_GLOBAL);
357 }
358
pte_clrglobal(pte_t pte)359 static inline pte_t pte_clrglobal(pte_t pte)
360 {
361 return pte_clear_flags(pte, _PAGE_GLOBAL);
362 }
363
pte_mkspecial(pte_t pte)364 static inline pte_t pte_mkspecial(pte_t pte)
365 {
366 return pte_set_flags(pte, _PAGE_SPECIAL);
367 }
368
pte_mkdevmap(pte_t pte)369 static inline pte_t pte_mkdevmap(pte_t pte)
370 {
371 return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
372 }
373
pmd_set_flags(pmd_t pmd,pmdval_t set)374 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
375 {
376 pmdval_t v = native_pmd_val(pmd);
377
378 return native_make_pmd(v | set);
379 }
380
pmd_clear_flags(pmd_t pmd,pmdval_t clear)381 static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
382 {
383 pmdval_t v = native_pmd_val(pmd);
384
385 return native_make_pmd(v & ~clear);
386 }
387
388 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pmd_uffd_wp(pmd_t pmd)389 static inline int pmd_uffd_wp(pmd_t pmd)
390 {
391 return pmd_flags(pmd) & _PAGE_UFFD_WP;
392 }
393
pmd_mkuffd_wp(pmd_t pmd)394 static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
395 {
396 return pmd_set_flags(pmd, _PAGE_UFFD_WP);
397 }
398
pmd_clear_uffd_wp(pmd_t pmd)399 static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
400 {
401 return pmd_clear_flags(pmd, _PAGE_UFFD_WP);
402 }
403 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
404
pmd_mkold(pmd_t pmd)405 static inline pmd_t pmd_mkold(pmd_t pmd)
406 {
407 return pmd_clear_flags(pmd, _PAGE_ACCESSED);
408 }
409
pmd_mkclean(pmd_t pmd)410 static inline pmd_t pmd_mkclean(pmd_t pmd)
411 {
412 return pmd_clear_flags(pmd, _PAGE_DIRTY);
413 }
414
pmd_wrprotect(pmd_t pmd)415 static inline pmd_t pmd_wrprotect(pmd_t pmd)
416 {
417 return pmd_clear_flags(pmd, _PAGE_RW);
418 }
419
pmd_mkdirty(pmd_t pmd)420 static inline pmd_t pmd_mkdirty(pmd_t pmd)
421 {
422 return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
423 }
424
pmd_mkdevmap(pmd_t pmd)425 static inline pmd_t pmd_mkdevmap(pmd_t pmd)
426 {
427 return pmd_set_flags(pmd, _PAGE_DEVMAP);
428 }
429
pmd_mkhuge(pmd_t pmd)430 static inline pmd_t pmd_mkhuge(pmd_t pmd)
431 {
432 return pmd_set_flags(pmd, _PAGE_PSE);
433 }
434
pmd_mkyoung(pmd_t pmd)435 static inline pmd_t pmd_mkyoung(pmd_t pmd)
436 {
437 return pmd_set_flags(pmd, _PAGE_ACCESSED);
438 }
439
pmd_mkwrite(pmd_t pmd)440 static inline pmd_t pmd_mkwrite(pmd_t pmd)
441 {
442 return pmd_set_flags(pmd, _PAGE_RW);
443 }
444
pud_set_flags(pud_t pud,pudval_t set)445 static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
446 {
447 pudval_t v = native_pud_val(pud);
448
449 return native_make_pud(v | set);
450 }
451
pud_clear_flags(pud_t pud,pudval_t clear)452 static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
453 {
454 pudval_t v = native_pud_val(pud);
455
456 return native_make_pud(v & ~clear);
457 }
458
pud_mkold(pud_t pud)459 static inline pud_t pud_mkold(pud_t pud)
460 {
461 return pud_clear_flags(pud, _PAGE_ACCESSED);
462 }
463
pud_mkclean(pud_t pud)464 static inline pud_t pud_mkclean(pud_t pud)
465 {
466 return pud_clear_flags(pud, _PAGE_DIRTY);
467 }
468
pud_wrprotect(pud_t pud)469 static inline pud_t pud_wrprotect(pud_t pud)
470 {
471 return pud_clear_flags(pud, _PAGE_RW);
472 }
473
pud_mkdirty(pud_t pud)474 static inline pud_t pud_mkdirty(pud_t pud)
475 {
476 return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
477 }
478
pud_mkdevmap(pud_t pud)479 static inline pud_t pud_mkdevmap(pud_t pud)
480 {
481 return pud_set_flags(pud, _PAGE_DEVMAP);
482 }
483
pud_mkhuge(pud_t pud)484 static inline pud_t pud_mkhuge(pud_t pud)
485 {
486 return pud_set_flags(pud, _PAGE_PSE);
487 }
488
pud_mkyoung(pud_t pud)489 static inline pud_t pud_mkyoung(pud_t pud)
490 {
491 return pud_set_flags(pud, _PAGE_ACCESSED);
492 }
493
pud_mkwrite(pud_t pud)494 static inline pud_t pud_mkwrite(pud_t pud)
495 {
496 return pud_set_flags(pud, _PAGE_RW);
497 }
498
499 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_soft_dirty(pte_t pte)500 static inline int pte_soft_dirty(pte_t pte)
501 {
502 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
503 }
504
pmd_soft_dirty(pmd_t pmd)505 static inline int pmd_soft_dirty(pmd_t pmd)
506 {
507 return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
508 }
509
pud_soft_dirty(pud_t pud)510 static inline int pud_soft_dirty(pud_t pud)
511 {
512 return pud_flags(pud) & _PAGE_SOFT_DIRTY;
513 }
514
pte_mksoft_dirty(pte_t pte)515 static inline pte_t pte_mksoft_dirty(pte_t pte)
516 {
517 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
518 }
519
pmd_mksoft_dirty(pmd_t pmd)520 static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
521 {
522 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
523 }
524
pud_mksoft_dirty(pud_t pud)525 static inline pud_t pud_mksoft_dirty(pud_t pud)
526 {
527 return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
528 }
529
pte_clear_soft_dirty(pte_t pte)530 static inline pte_t pte_clear_soft_dirty(pte_t pte)
531 {
532 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
533 }
534
pmd_clear_soft_dirty(pmd_t pmd)535 static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
536 {
537 return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
538 }
539
pud_clear_soft_dirty(pud_t pud)540 static inline pud_t pud_clear_soft_dirty(pud_t pud)
541 {
542 return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
543 }
544
545 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
546
547 /*
548 * Mask out unsupported bits in a present pgprot. Non-present pgprots
549 * can use those bits for other purposes, so leave them be.
550 */
massage_pgprot(pgprot_t pgprot)551 static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
552 {
553 pgprotval_t protval = pgprot_val(pgprot);
554
555 if (protval & _PAGE_PRESENT)
556 protval &= __supported_pte_mask;
557
558 return protval;
559 }
560
check_pgprot(pgprot_t pgprot)561 static inline pgprotval_t check_pgprot(pgprot_t pgprot)
562 {
563 pgprotval_t massaged_val = massage_pgprot(pgprot);
564
565 /* mmdebug.h can not be included here because of dependencies */
566 #ifdef CONFIG_DEBUG_VM
567 WARN_ONCE(pgprot_val(pgprot) != massaged_val,
568 "attempted to set unsupported pgprot: %016llx "
569 "bits: %016llx supported: %016llx\n",
570 (u64)pgprot_val(pgprot),
571 (u64)pgprot_val(pgprot) ^ massaged_val,
572 (u64)__supported_pte_mask);
573 #endif
574
575 return massaged_val;
576 }
577
pfn_pte(unsigned long page_nr,pgprot_t pgprot)578 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
579 {
580 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
581 pfn ^= protnone_mask(pgprot_val(pgprot));
582 pfn &= PTE_PFN_MASK;
583 return __pte(pfn | check_pgprot(pgprot));
584 }
585
pfn_pmd(unsigned long page_nr,pgprot_t pgprot)586 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
587 {
588 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
589 pfn ^= protnone_mask(pgprot_val(pgprot));
590 pfn &= PHYSICAL_PMD_PAGE_MASK;
591 return __pmd(pfn | check_pgprot(pgprot));
592 }
593
pfn_pud(unsigned long page_nr,pgprot_t pgprot)594 static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
595 {
596 phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
597 pfn ^= protnone_mask(pgprot_val(pgprot));
598 pfn &= PHYSICAL_PUD_PAGE_MASK;
599 return __pud(pfn | check_pgprot(pgprot));
600 }
601
pmd_mkinvalid(pmd_t pmd)602 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
603 {
604 return pfn_pmd(pmd_pfn(pmd),
605 __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
606 }
607
608 static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
609
pte_modify(pte_t pte,pgprot_t newprot)610 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
611 {
612 pteval_t val = pte_val(pte), oldval = val;
613
614 /*
615 * Chop off the NX bit (if present), and add the NX portion of
616 * the newprot (if present):
617 */
618 val &= _PAGE_CHG_MASK;
619 val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
620 val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
621 return __pte(val);
622 }
623
pmd_modify(pmd_t pmd,pgprot_t newprot)624 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
625 {
626 pmdval_t val = pmd_val(pmd), oldval = val;
627
628 val &= _HPAGE_CHG_MASK;
629 val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
630 val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
631 return __pmd(val);
632 }
633
634 /*
635 * mprotect needs to preserve PAT and encryption bits when updating
636 * vm_page_prot
637 */
638 #define pgprot_modify pgprot_modify
pgprot_modify(pgprot_t oldprot,pgprot_t newprot)639 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
640 {
641 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
642 pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
643 return __pgprot(preservebits | addbits);
644 }
645
646 #define pte_pgprot(x) __pgprot(pte_flags(x))
647 #define pmd_pgprot(x) __pgprot(pmd_flags(x))
648 #define pud_pgprot(x) __pgprot(pud_flags(x))
649 #define p4d_pgprot(x) __pgprot(p4d_flags(x))
650
651 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
652
is_new_memtype_allowed(u64 paddr,unsigned long size,enum page_cache_mode pcm,enum page_cache_mode new_pcm)653 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
654 enum page_cache_mode pcm,
655 enum page_cache_mode new_pcm)
656 {
657 /*
658 * PAT type is always WB for untracked ranges, so no need to check.
659 */
660 if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
661 return 1;
662
663 /*
664 * Certain new memtypes are not allowed with certain
665 * requested memtype:
666 * - request is uncached, return cannot be write-back
667 * - request is write-combine, return cannot be write-back
668 * - request is write-through, return cannot be write-back
669 * - request is write-through, return cannot be write-combine
670 */
671 if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
672 new_pcm == _PAGE_CACHE_MODE_WB) ||
673 (pcm == _PAGE_CACHE_MODE_WC &&
674 new_pcm == _PAGE_CACHE_MODE_WB) ||
675 (pcm == _PAGE_CACHE_MODE_WT &&
676 new_pcm == _PAGE_CACHE_MODE_WB) ||
677 (pcm == _PAGE_CACHE_MODE_WT &&
678 new_pcm == _PAGE_CACHE_MODE_WC)) {
679 return 0;
680 }
681
682 return 1;
683 }
684
685 pmd_t *populate_extra_pmd(unsigned long vaddr);
686 pte_t *populate_extra_pte(unsigned long vaddr);
687
688 #ifdef CONFIG_PAGE_TABLE_ISOLATION
689 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
690
691 /*
692 * Take a PGD location (pgdp) and a pgd value that needs to be set there.
693 * Populates the user and returns the resulting PGD that must be set in
694 * the kernel copy of the page tables.
695 */
pti_set_user_pgtbl(pgd_t * pgdp,pgd_t pgd)696 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
697 {
698 if (!static_cpu_has(X86_FEATURE_PTI))
699 return pgd;
700 return __pti_set_user_pgtbl(pgdp, pgd);
701 }
702 #else /* CONFIG_PAGE_TABLE_ISOLATION */
pti_set_user_pgtbl(pgd_t * pgdp,pgd_t pgd)703 static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
704 {
705 return pgd;
706 }
707 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
708
709 #endif /* __ASSEMBLY__ */
710
711
712 #ifdef CONFIG_X86_32
713 # include <asm/pgtable_32.h>
714 #else
715 # include <asm/pgtable_64.h>
716 #endif
717
718 #ifndef __ASSEMBLY__
719 #include <linux/mm_types.h>
720 #include <linux/mmdebug.h>
721 #include <linux/log2.h>
722 #include <asm/fixmap.h>
723
pte_none(pte_t pte)724 static inline int pte_none(pte_t pte)
725 {
726 return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
727 }
728
729 #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t a,pte_t b)730 static inline int pte_same(pte_t a, pte_t b)
731 {
732 return a.pte == b.pte;
733 }
734
pte_present(pte_t a)735 static inline int pte_present(pte_t a)
736 {
737 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
738 }
739
740 #ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
pte_devmap(pte_t a)741 static inline int pte_devmap(pte_t a)
742 {
743 return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
744 }
745 #endif
746
747 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)748 static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
749 {
750 if (pte_flags(a) & _PAGE_PRESENT)
751 return true;
752
753 if ((pte_flags(a) & _PAGE_PROTNONE) &&
754 atomic_read(&mm->tlb_flush_pending))
755 return true;
756
757 return false;
758 }
759
pmd_present(pmd_t pmd)760 static inline int pmd_present(pmd_t pmd)
761 {
762 /*
763 * Checking for _PAGE_PSE is needed too because
764 * split_huge_page will temporarily clear the present bit (but
765 * the _PAGE_PSE flag will remain set at all times while the
766 * _PAGE_PRESENT bit is clear).
767 */
768 return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
769 }
770
771 #ifdef CONFIG_NUMA_BALANCING
772 /*
773 * These work without NUMA balancing but the kernel does not care. See the
774 * comment in include/linux/pgtable.h
775 */
pte_protnone(pte_t pte)776 static inline int pte_protnone(pte_t pte)
777 {
778 return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
779 == _PAGE_PROTNONE;
780 }
781
pmd_protnone(pmd_t pmd)782 static inline int pmd_protnone(pmd_t pmd)
783 {
784 return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
785 == _PAGE_PROTNONE;
786 }
787 #endif /* CONFIG_NUMA_BALANCING */
788
pmd_none(pmd_t pmd)789 static inline int pmd_none(pmd_t pmd)
790 {
791 /* Only check low word on 32-bit platforms, since it might be
792 out of sync with upper half. */
793 unsigned long val = native_pmd_val(pmd);
794 return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
795 }
796
pmd_page_vaddr(pmd_t pmd)797 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
798 {
799 return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
800 }
801
802 /*
803 * Currently stuck as a macro due to indirect forward reference to
804 * linux/mmzone.h's __section_mem_map_addr() definition:
805 */
806 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
807
808 /*
809 * Conversion functions: convert a page and protection to a page entry,
810 * and a page entry and page directory to the page they refer to.
811 *
812 * (Currently stuck as a macro because of indirect forward reference
813 * to linux/mm.h:page_to_nid())
814 */
815 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
816
pmd_bad(pmd_t pmd)817 static inline int pmd_bad(pmd_t pmd)
818 {
819 return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) !=
820 (_KERNPG_TABLE & ~_PAGE_ACCESSED);
821 }
822
pages_to_mb(unsigned long npg)823 static inline unsigned long pages_to_mb(unsigned long npg)
824 {
825 return npg >> (20 - PAGE_SHIFT);
826 }
827
828 #if CONFIG_PGTABLE_LEVELS > 2
pud_none(pud_t pud)829 static inline int pud_none(pud_t pud)
830 {
831 return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
832 }
833
pud_present(pud_t pud)834 static inline int pud_present(pud_t pud)
835 {
836 return pud_flags(pud) & _PAGE_PRESENT;
837 }
838
pud_pgtable(pud_t pud)839 static inline pmd_t *pud_pgtable(pud_t pud)
840 {
841 return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud));
842 }
843
844 /*
845 * Currently stuck as a macro due to indirect forward reference to
846 * linux/mmzone.h's __section_mem_map_addr() definition:
847 */
848 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
849
850 #define pud_leaf pud_large
pud_large(pud_t pud)851 static inline int pud_large(pud_t pud)
852 {
853 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
854 (_PAGE_PSE | _PAGE_PRESENT);
855 }
856
pud_bad(pud_t pud)857 static inline int pud_bad(pud_t pud)
858 {
859 return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
860 }
861 #else
862 #define pud_leaf pud_large
pud_large(pud_t pud)863 static inline int pud_large(pud_t pud)
864 {
865 return 0;
866 }
867 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
868
869 #if CONFIG_PGTABLE_LEVELS > 3
p4d_none(p4d_t p4d)870 static inline int p4d_none(p4d_t p4d)
871 {
872 return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
873 }
874
p4d_present(p4d_t p4d)875 static inline int p4d_present(p4d_t p4d)
876 {
877 return p4d_flags(p4d) & _PAGE_PRESENT;
878 }
879
p4d_pgtable(p4d_t p4d)880 static inline pud_t *p4d_pgtable(p4d_t p4d)
881 {
882 return (pud_t *)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
883 }
884
885 /*
886 * Currently stuck as a macro due to indirect forward reference to
887 * linux/mmzone.h's __section_mem_map_addr() definition:
888 */
889 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
890
p4d_bad(p4d_t p4d)891 static inline int p4d_bad(p4d_t p4d)
892 {
893 unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
894
895 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
896 ignore_flags |= _PAGE_NX;
897
898 return (p4d_flags(p4d) & ~ignore_flags) != 0;
899 }
900 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
901
p4d_index(unsigned long address)902 static inline unsigned long p4d_index(unsigned long address)
903 {
904 return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
905 }
906
907 #if CONFIG_PGTABLE_LEVELS > 4
pgd_present(pgd_t pgd)908 static inline int pgd_present(pgd_t pgd)
909 {
910 if (!pgtable_l5_enabled())
911 return 1;
912 return pgd_flags(pgd) & _PAGE_PRESENT;
913 }
914
pgd_page_vaddr(pgd_t pgd)915 static inline unsigned long pgd_page_vaddr(pgd_t pgd)
916 {
917 return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
918 }
919
920 /*
921 * Currently stuck as a macro due to indirect forward reference to
922 * linux/mmzone.h's __section_mem_map_addr() definition:
923 */
924 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
925
926 /* to find an entry in a page-table-directory. */
p4d_offset(pgd_t * pgd,unsigned long address)927 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
928 {
929 if (!pgtable_l5_enabled())
930 return (p4d_t *)pgd;
931 return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
932 }
933
pgd_bad(pgd_t pgd)934 static inline int pgd_bad(pgd_t pgd)
935 {
936 unsigned long ignore_flags = _PAGE_USER;
937
938 if (!pgtable_l5_enabled())
939 return 0;
940
941 if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
942 ignore_flags |= _PAGE_NX;
943
944 return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
945 }
946
pgd_none(pgd_t pgd)947 static inline int pgd_none(pgd_t pgd)
948 {
949 if (!pgtable_l5_enabled())
950 return 0;
951 /*
952 * There is no need to do a workaround for the KNL stray
953 * A/D bit erratum here. PGDs only point to page tables
954 * except on 32-bit non-PAE which is not supported on
955 * KNL.
956 */
957 return !native_pgd_val(pgd);
958 }
959 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
960
961 #endif /* __ASSEMBLY__ */
962
963 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
964 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
965
966 #ifndef __ASSEMBLY__
967
968 extern int direct_gbpages;
969 void init_mem_mapping(void);
970 void early_alloc_pgt_buf(void);
971 extern void memblock_find_dma_reserve(void);
972 void __init poking_init(void);
973 unsigned long init_memory_mapping(unsigned long start,
974 unsigned long end, pgprot_t prot);
975
976 #ifdef CONFIG_X86_64
977 extern pgd_t trampoline_pgd_entry;
978 #endif
979
980 /* local pte updates need not use xchg for locking */
native_local_ptep_get_and_clear(pte_t * ptep)981 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
982 {
983 pte_t res = *ptep;
984
985 /* Pure native function needs no input for mm, addr */
986 native_pte_clear(NULL, 0, ptep);
987 return res;
988 }
989
native_local_pmdp_get_and_clear(pmd_t * pmdp)990 static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
991 {
992 pmd_t res = *pmdp;
993
994 native_pmd_clear(pmdp);
995 return res;
996 }
997
native_local_pudp_get_and_clear(pud_t * pudp)998 static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
999 {
1000 pud_t res = *pudp;
1001
1002 native_pud_clear(pudp);
1003 return res;
1004 }
1005
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)1006 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1007 pte_t *ptep, pte_t pte)
1008 {
1009 page_table_check_pte_set(mm, addr, ptep, pte);
1010 set_pte(ptep, pte);
1011 }
1012
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)1013 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1014 pmd_t *pmdp, pmd_t pmd)
1015 {
1016 page_table_check_pmd_set(mm, addr, pmdp, pmd);
1017 set_pmd(pmdp, pmd);
1018 }
1019
set_pud_at(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t pud)1020 static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1021 pud_t *pudp, pud_t pud)
1022 {
1023 page_table_check_pud_set(mm, addr, pudp, pud);
1024 native_set_pud(pudp, pud);
1025 }
1026
1027 /*
1028 * We only update the dirty/accessed state if we set
1029 * the dirty bit by hand in the kernel, since the hardware
1030 * will do the accessed bit for us, and we don't want to
1031 * race with other CPU's that might be updating the dirty
1032 * bit at the same time.
1033 */
1034 struct vm_area_struct;
1035
1036 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1037 extern int ptep_set_access_flags(struct vm_area_struct *vma,
1038 unsigned long address, pte_t *ptep,
1039 pte_t entry, int dirty);
1040
1041 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1042 extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1043 unsigned long addr, pte_t *ptep);
1044
1045 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1046 extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1047 unsigned long address, pte_t *ptep);
1048
1049 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1050 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1051 pte_t *ptep)
1052 {
1053 pte_t pte = native_ptep_get_and_clear(ptep);
1054 page_table_check_pte_clear(mm, addr, pte);
1055 return pte;
1056 }
1057
1058 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
ptep_get_and_clear_full(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int full)1059 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1060 unsigned long addr, pte_t *ptep,
1061 int full)
1062 {
1063 pte_t pte;
1064 if (full) {
1065 /*
1066 * Full address destruction in progress; paravirt does not
1067 * care about updates and native needs no locking
1068 */
1069 pte = native_local_ptep_get_and_clear(ptep);
1070 page_table_check_pte_clear(mm, addr, pte);
1071 } else {
1072 pte = ptep_get_and_clear(mm, addr, ptep);
1073 }
1074 return pte;
1075 }
1076
1077 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)1078 static inline void ptep_set_wrprotect(struct mm_struct *mm,
1079 unsigned long addr, pte_t *ptep)
1080 {
1081 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
1082 }
1083
1084 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1085
1086 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1087
1088 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1089 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1090 unsigned long address, pmd_t *pmdp,
1091 pmd_t entry, int dirty);
1092 extern int pudp_set_access_flags(struct vm_area_struct *vma,
1093 unsigned long address, pud_t *pudp,
1094 pud_t entry, int dirty);
1095
1096 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1097 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1098 unsigned long addr, pmd_t *pmdp);
1099 extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1100 unsigned long addr, pud_t *pudp);
1101
1102 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1103 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1104 unsigned long address, pmd_t *pmdp);
1105
1106
1107 #define pmd_write pmd_write
pmd_write(pmd_t pmd)1108 static inline int pmd_write(pmd_t pmd)
1109 {
1110 return pmd_flags(pmd) & _PAGE_RW;
1111 }
1112
1113 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1114 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
1115 pmd_t *pmdp)
1116 {
1117 pmd_t pmd = native_pmdp_get_and_clear(pmdp);
1118
1119 page_table_check_pmd_clear(mm, addr, pmd);
1120
1121 return pmd;
1122 }
1123
1124 #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
pudp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pud_t * pudp)1125 static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1126 unsigned long addr, pud_t *pudp)
1127 {
1128 pud_t pud = native_pudp_get_and_clear(pudp);
1129
1130 page_table_check_pud_clear(mm, addr, pud);
1131
1132 return pud;
1133 }
1134
1135 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
pmdp_set_wrprotect(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)1136 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1137 unsigned long addr, pmd_t *pmdp)
1138 {
1139 clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
1140 }
1141
1142 #define pud_write pud_write
pud_write(pud_t pud)1143 static inline int pud_write(pud_t pud)
1144 {
1145 return pud_flags(pud) & _PAGE_RW;
1146 }
1147
1148 #ifndef pmdp_establish
1149 #define pmdp_establish pmdp_establish
pmdp_establish(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t pmd)1150 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1151 unsigned long address, pmd_t *pmdp, pmd_t pmd)
1152 {
1153 page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
1154 if (IS_ENABLED(CONFIG_SMP)) {
1155 return xchg(pmdp, pmd);
1156 } else {
1157 pmd_t old = *pmdp;
1158 WRITE_ONCE(*pmdp, pmd);
1159 return old;
1160 }
1161 }
1162 #endif
1163
1164 #define __HAVE_ARCH_PMDP_INVALIDATE_AD
1165 extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
1166 unsigned long address, pmd_t *pmdp);
1167
1168 /*
1169 * Page table pages are page-aligned. The lower half of the top
1170 * level is used for userspace and the top half for the kernel.
1171 *
1172 * Returns true for parts of the PGD that map userspace and
1173 * false for the parts that map the kernel.
1174 */
pgdp_maps_userspace(void * __ptr)1175 static inline bool pgdp_maps_userspace(void *__ptr)
1176 {
1177 unsigned long ptr = (unsigned long)__ptr;
1178
1179 return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
1180 }
1181
1182 #define pgd_leaf pgd_large
pgd_large(pgd_t pgd)1183 static inline int pgd_large(pgd_t pgd) { return 0; }
1184
1185 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1186 /*
1187 * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
1188 * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and
1189 * the user one is in the last 4k. To switch between them, you
1190 * just need to flip the 12th bit in their addresses.
1191 */
1192 #define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT
1193
1194 /*
1195 * This generates better code than the inline assembly in
1196 * __set_bit().
1197 */
ptr_set_bit(void * ptr,int bit)1198 static inline void *ptr_set_bit(void *ptr, int bit)
1199 {
1200 unsigned long __ptr = (unsigned long)ptr;
1201
1202 __ptr |= BIT(bit);
1203 return (void *)__ptr;
1204 }
ptr_clear_bit(void * ptr,int bit)1205 static inline void *ptr_clear_bit(void *ptr, int bit)
1206 {
1207 unsigned long __ptr = (unsigned long)ptr;
1208
1209 __ptr &= ~BIT(bit);
1210 return (void *)__ptr;
1211 }
1212
kernel_to_user_pgdp(pgd_t * pgdp)1213 static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
1214 {
1215 return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1216 }
1217
user_to_kernel_pgdp(pgd_t * pgdp)1218 static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
1219 {
1220 return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1221 }
1222
kernel_to_user_p4dp(p4d_t * p4dp)1223 static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
1224 {
1225 return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1226 }
1227
user_to_kernel_p4dp(p4d_t * p4dp)1228 static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
1229 {
1230 return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1231 }
1232 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
1233
1234 /*
1235 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1236 *
1237 * dst - pointer to pgd range anywhere on a pgd page
1238 * src - ""
1239 * count - the number of pgds to copy.
1240 *
1241 * dst and src can be on the same page, but the range must not overlap,
1242 * and must not cross a page boundary.
1243 */
clone_pgd_range(pgd_t * dst,pgd_t * src,int count)1244 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1245 {
1246 memcpy(dst, src, count * sizeof(pgd_t));
1247 #ifdef CONFIG_PAGE_TABLE_ISOLATION
1248 if (!static_cpu_has(X86_FEATURE_PTI))
1249 return;
1250 /* Clone the user space pgd as well */
1251 memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1252 count * sizeof(pgd_t));
1253 #endif
1254 }
1255
1256 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
page_level_shift(enum pg_level level)1257 static inline int page_level_shift(enum pg_level level)
1258 {
1259 return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1260 }
page_level_size(enum pg_level level)1261 static inline unsigned long page_level_size(enum pg_level level)
1262 {
1263 return 1UL << page_level_shift(level);
1264 }
page_level_mask(enum pg_level level)1265 static inline unsigned long page_level_mask(enum pg_level level)
1266 {
1267 return ~(page_level_size(level) - 1);
1268 }
1269
1270 /*
1271 * The x86 doesn't have any external MMU info: the kernel page
1272 * tables contain all the necessary information.
1273 */
update_mmu_cache(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)1274 static inline void update_mmu_cache(struct vm_area_struct *vma,
1275 unsigned long addr, pte_t *ptep)
1276 {
1277 }
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd)1278 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1279 unsigned long addr, pmd_t *pmd)
1280 {
1281 }
update_mmu_cache_pud(struct vm_area_struct * vma,unsigned long addr,pud_t * pud)1282 static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1283 unsigned long addr, pud_t *pud)
1284 {
1285 }
1286 #ifdef _PAGE_SWP_EXCLUSIVE
1287 #define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
pte_swp_mkexclusive(pte_t pte)1288 static inline pte_t pte_swp_mkexclusive(pte_t pte)
1289 {
1290 return pte_set_flags(pte, _PAGE_SWP_EXCLUSIVE);
1291 }
1292
pte_swp_exclusive(pte_t pte)1293 static inline int pte_swp_exclusive(pte_t pte)
1294 {
1295 return pte_flags(pte) & _PAGE_SWP_EXCLUSIVE;
1296 }
1297
pte_swp_clear_exclusive(pte_t pte)1298 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
1299 {
1300 return pte_clear_flags(pte, _PAGE_SWP_EXCLUSIVE);
1301 }
1302 #endif /* _PAGE_SWP_EXCLUSIVE */
1303
1304 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
pte_swp_mksoft_dirty(pte_t pte)1305 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1306 {
1307 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1308 }
1309
pte_swp_soft_dirty(pte_t pte)1310 static inline int pte_swp_soft_dirty(pte_t pte)
1311 {
1312 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1313 }
1314
pte_swp_clear_soft_dirty(pte_t pte)1315 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1316 {
1317 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1318 }
1319
1320 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_swp_mksoft_dirty(pmd_t pmd)1321 static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1322 {
1323 return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1324 }
1325
pmd_swp_soft_dirty(pmd_t pmd)1326 static inline int pmd_swp_soft_dirty(pmd_t pmd)
1327 {
1328 return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1329 }
1330
pmd_swp_clear_soft_dirty(pmd_t pmd)1331 static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1332 {
1333 return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1334 }
1335 #endif
1336 #endif
1337
1338 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
pte_swp_mkuffd_wp(pte_t pte)1339 static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
1340 {
1341 return pte_set_flags(pte, _PAGE_SWP_UFFD_WP);
1342 }
1343
pte_swp_uffd_wp(pte_t pte)1344 static inline int pte_swp_uffd_wp(pte_t pte)
1345 {
1346 return pte_flags(pte) & _PAGE_SWP_UFFD_WP;
1347 }
1348
pte_swp_clear_uffd_wp(pte_t pte)1349 static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
1350 {
1351 return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP);
1352 }
1353
pmd_swp_mkuffd_wp(pmd_t pmd)1354 static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
1355 {
1356 return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP);
1357 }
1358
pmd_swp_uffd_wp(pmd_t pmd)1359 static inline int pmd_swp_uffd_wp(pmd_t pmd)
1360 {
1361 return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP;
1362 }
1363
pmd_swp_clear_uffd_wp(pmd_t pmd)1364 static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
1365 {
1366 return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP);
1367 }
1368 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
1369
pte_flags_pkey(unsigned long pte_flags)1370 static inline u16 pte_flags_pkey(unsigned long pte_flags)
1371 {
1372 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1373 /* ifdef to avoid doing 59-bit shift on 32-bit values */
1374 return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1375 #else
1376 return 0;
1377 #endif
1378 }
1379
__pkru_allows_pkey(u16 pkey,bool write)1380 static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1381 {
1382 u32 pkru = read_pkru();
1383
1384 if (!__pkru_allows_read(pkru, pkey))
1385 return false;
1386 if (write && !__pkru_allows_write(pkru, pkey))
1387 return false;
1388
1389 return true;
1390 }
1391
1392 /*
1393 * 'pteval' can come from a PTE, PMD or PUD. We only check
1394 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1395 * same value on all 3 types.
1396 */
__pte_access_permitted(unsigned long pteval,bool write)1397 static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1398 {
1399 unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1400
1401 if (write)
1402 need_pte_bits |= _PAGE_RW;
1403
1404 if ((pteval & need_pte_bits) != need_pte_bits)
1405 return 0;
1406
1407 return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1408 }
1409
1410 #define pte_access_permitted pte_access_permitted
pte_access_permitted(pte_t pte,bool write)1411 static inline bool pte_access_permitted(pte_t pte, bool write)
1412 {
1413 return __pte_access_permitted(pte_val(pte), write);
1414 }
1415
1416 #define pmd_access_permitted pmd_access_permitted
pmd_access_permitted(pmd_t pmd,bool write)1417 static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1418 {
1419 return __pte_access_permitted(pmd_val(pmd), write);
1420 }
1421
1422 #define pud_access_permitted pud_access_permitted
pud_access_permitted(pud_t pud,bool write)1423 static inline bool pud_access_permitted(pud_t pud, bool write)
1424 {
1425 return __pte_access_permitted(pud_val(pud), write);
1426 }
1427
1428 #define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1429 extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1430
arch_has_pfn_modify_check(void)1431 static inline bool arch_has_pfn_modify_check(void)
1432 {
1433 return boot_cpu_has_bug(X86_BUG_L1TF);
1434 }
1435
1436 #define arch_has_hw_pte_young arch_has_hw_pte_young
arch_has_hw_pte_young(void)1437 static inline bool arch_has_hw_pte_young(void)
1438 {
1439 return true;
1440 }
1441
1442 #ifdef CONFIG_XEN_PV
1443 #define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young
arch_has_hw_nonleaf_pmd_young(void)1444 static inline bool arch_has_hw_nonleaf_pmd_young(void)
1445 {
1446 return !cpu_feature_enabled(X86_FEATURE_XENPV);
1447 }
1448 #endif
1449
1450 #ifdef CONFIG_PAGE_TABLE_CHECK
pte_user_accessible_page(pte_t pte)1451 static inline bool pte_user_accessible_page(pte_t pte)
1452 {
1453 return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
1454 }
1455
pmd_user_accessible_page(pmd_t pmd)1456 static inline bool pmd_user_accessible_page(pmd_t pmd)
1457 {
1458 return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) && (pmd_val(pmd) & _PAGE_USER);
1459 }
1460
pud_user_accessible_page(pud_t pud)1461 static inline bool pud_user_accessible_page(pud_t pud)
1462 {
1463 return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) && (pud_val(pud) & _PAGE_USER);
1464 }
1465 #endif
1466
1467 #endif /* __ASSEMBLY__ */
1468
1469 #endif /* _ASM_X86_PGTABLE_H */
1470