1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _S390_TLB_H
3 #define _S390_TLB_H
4
5 /*
6 * TLB flushing on s390 is complicated. The following requirement
7 * from the principles of operation is the most arduous:
8 *
9 * "A valid table entry must not be changed while it is attached
10 * to any CPU and may be used for translation by that CPU except to
11 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
12 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
13 * table entry, or (3) make a change by means of a COMPARE AND SWAP
14 * AND PURGE instruction that purges the TLB."
15 *
16 * The modification of a pte of an active mm struct therefore is
17 * a two step process: i) invalidate the pte, ii) store the new pte.
18 * This is true for the page protection bit as well.
19 * The only possible optimization is to flush at the beginning of
20 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
21 *
22 * Pages used for the page tables is a different story. FIXME: more
23 */
24
25 void __tlb_remove_table(void *_table);
26 static inline void tlb_flush(struct mmu_gather *tlb);
27 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
28 struct encoded_page *page,
29 int page_size);
30
31 #define tlb_flush tlb_flush
32 #define pte_free_tlb pte_free_tlb
33 #define pmd_free_tlb pmd_free_tlb
34 #define p4d_free_tlb p4d_free_tlb
35 #define pud_free_tlb pud_free_tlb
36
37 #include <asm/tlbflush.h>
38 #include <asm-generic/tlb.h>
39
40 /*
41 * Release the page cache reference for a pte removed by
42 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
43 * has already been freed, so just do free_page_and_swap_cache.
44 *
45 * s390 doesn't delay rmap removal, so there is nothing encoded in
46 * the page pointer.
47 */
__tlb_remove_page_size(struct mmu_gather * tlb,struct encoded_page * page,int page_size)48 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
49 struct encoded_page *page,
50 int page_size)
51 {
52 free_page_and_swap_cache(encoded_page_ptr(page));
53 return false;
54 }
55
tlb_flush(struct mmu_gather * tlb)56 static inline void tlb_flush(struct mmu_gather *tlb)
57 {
58 __tlb_flush_mm_lazy(tlb->mm);
59 }
60
61 /*
62 * pte_free_tlb frees a pte table and clears the CRSTE for the
63 * page table from the tlb.
64 */
pte_free_tlb(struct mmu_gather * tlb,pgtable_t pte,unsigned long address)65 static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
66 unsigned long address)
67 {
68 __tlb_adjust_range(tlb, address, PAGE_SIZE);
69 tlb->mm->context.flush_mm = 1;
70 tlb->freed_tables = 1;
71 tlb->cleared_pmds = 1;
72 /*
73 * page_table_free_rcu takes care of the allocation bit masks
74 * of the 2K table fragments in the 4K page table page,
75 * then calls tlb_remove_table.
76 */
77 page_table_free_rcu(tlb, (unsigned long *) pte, address);
78 }
79
80 /*
81 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
82 * segment table entry from the tlb.
83 * If the mm uses a two level page table the single pmd is freed
84 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
85 * to avoid the double free of the pmd in this case.
86 */
pmd_free_tlb(struct mmu_gather * tlb,pmd_t * pmd,unsigned long address)87 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
88 unsigned long address)
89 {
90 if (mm_pmd_folded(tlb->mm))
91 return;
92 pagetable_pmd_dtor(virt_to_ptdesc(pmd));
93 __tlb_adjust_range(tlb, address, PAGE_SIZE);
94 tlb->mm->context.flush_mm = 1;
95 tlb->freed_tables = 1;
96 tlb->cleared_puds = 1;
97 tlb_remove_ptdesc(tlb, pmd);
98 }
99
100 /*
101 * p4d_free_tlb frees a pud table and clears the CRSTE for the
102 * region second table entry from the tlb.
103 * If the mm uses a four level page table the single p4d is freed
104 * as the pgd. p4d_free_tlb checks the asce_limit against 8PB
105 * to avoid the double free of the p4d in this case.
106 */
p4d_free_tlb(struct mmu_gather * tlb,p4d_t * p4d,unsigned long address)107 static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
108 unsigned long address)
109 {
110 if (mm_p4d_folded(tlb->mm))
111 return;
112 __tlb_adjust_range(tlb, address, PAGE_SIZE);
113 tlb->mm->context.flush_mm = 1;
114 tlb->freed_tables = 1;
115 tlb_remove_table(tlb, p4d);
116 }
117
118 /*
119 * pud_free_tlb frees a pud table and clears the CRSTE for the
120 * region third table entry from the tlb.
121 * If the mm uses a three level page table the single pud is freed
122 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
123 * to avoid the double free of the pud in this case.
124 */
pud_free_tlb(struct mmu_gather * tlb,pud_t * pud,unsigned long address)125 static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
126 unsigned long address)
127 {
128 if (mm_pud_folded(tlb->mm))
129 return;
130 tlb->mm->context.flush_mm = 1;
131 tlb->freed_tables = 1;
132 tlb->cleared_p4ds = 1;
133 tlb_remove_table(tlb, pud);
134 }
135
136
137 #endif /* _S390_TLB_H */
138