1 #ifndef __ASM_SH64_MMU_CONTEXT_H
2 #define __ASM_SH64_MMU_CONTEXT_H
3
4 /*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/mmu_context.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003 Richard Curnow
13 *
14 * ASID handling idea taken from MIPS implementation.
15 *
16 */
17
18 #ifndef __ASSEMBLY__
19
20 /*
21 * Cache of MMU context last used.
22 *
23 * The MMU "context" consists of two things:
24 * (a) TLB cache version (or cycle, top 24 bits of mmu_context_cache)
25 * (b) ASID (Address Space IDentifier, bottom 8 bits of mmu_context_cache)
26 */
27 extern unsigned long mmu_context_cache;
28
29 #include <linux/config.h>
30 #include <asm/page.h>
31
32
33 /* Current mm's pgd */
34 extern pgd_t *mmu_pdtp_cache;
35
36 #define SR_ASID_MASK 0xffffffffff00ffff
37 #define SR_ASID_SHIFT 16
38
39 #define MMU_CONTEXT_ASID_MASK 0x000000ff
40 #define MMU_CONTEXT_VERSION_MASK 0xffffff00
41 #define MMU_CONTEXT_FIRST_VERSION 0x00000100
42 #define NO_CONTEXT 0
43
44 /* ASID is 8-bit value, so it can't be 0x100 */
45 #define MMU_NO_ASID 0x100
46
47
48 /*
49 * Virtual Page Number mask
50 */
51 #define MMU_VPN_MASK 0xfffff000
52
53 extern __inline__ void
get_new_mmu_context(struct mm_struct * mm)54 get_new_mmu_context(struct mm_struct *mm)
55 {
56 extern void flush_tlb_all(void);
57 extern void flush_cache_all(void);
58
59 unsigned long mc = ++mmu_context_cache;
60
61 if (!(mc & MMU_CONTEXT_ASID_MASK)) {
62 /* We exhaust ASID of this version.
63 Flush all TLB and start new cycle. */
64 flush_tlb_all();
65 /* We have to flush all caches as ASIDs are
66 used in cache */
67 flush_cache_all();
68 /* Fix version if needed.
69 Note that we avoid version #0/asid #0 to distingush NO_CONTEXT. */
70 if (!mc)
71 mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION;
72 }
73 mm->context = mc;
74 }
75
76 /*
77 * Get MMU context if needed.
78 */
79 static __inline__ void
get_mmu_context(struct mm_struct * mm)80 get_mmu_context(struct mm_struct *mm)
81 {
82 if (mm) {
83 unsigned long mc = mmu_context_cache;
84 /* Check if we have old version of context.
85 If it's old, we need to get new context with new version. */
86 if ((mm->context ^ mc) & MMU_CONTEXT_VERSION_MASK)
87 get_new_mmu_context(mm);
88 }
89 }
90
91 /*
92 * Initialize the context related info for a new mm_struct
93 * instance.
94 */
init_new_context(struct task_struct * tsk,struct mm_struct * mm)95 static __inline__ int init_new_context(struct task_struct *tsk,
96 struct mm_struct *mm)
97 {
98 mm->context = NO_CONTEXT;
99 return 0;
100 }
101
102 /*
103 * Destroy context related info for an mm_struct that is about
104 * to be put to rest.
105 */
destroy_context(struct mm_struct * mm)106 static __inline__ void destroy_context(struct mm_struct *mm)
107 {
108 extern void flush_tlb_mm(struct mm_struct *mm);
109
110 /* Well, at least free TLB entries */
111 flush_tlb_mm(mm);
112 }
113
114 #endif /* __ASSEMBLY__ */
115
116 /* Common defines */
117 #define TLB_STEP 0x00000010
118 #define TLB_PTEH 0x00000000
119 #define TLB_PTEL 0x00000008
120
121 /* PTEH defines */
122 #define PTEH_ASID_SHIFT 2
123 #define PTEH_VALID 0x0000000000000001
124 #define PTEH_SHARED 0x0000000000000002
125 #define PTEH_MATCH_ASID 0x00000000000003ff
126
127 #ifndef __ASSEMBLY__
128 /* This has to be a common function because the next location to fill
129 * information is shared. */
130 extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte);
131
132 /* Profiling counter. */
133 #if (CONFIG_SH64_PROC_TLB)
134 extern unsigned long long calls_to_do_fast_page_fault;
135 #endif
136
get_asid(void)137 static inline unsigned long get_asid(void)
138 {
139 unsigned long long sr;
140
141 asm volatile ("getcon " __c0 ", %0\n\t"
142 : "=r" (sr));
143
144 sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
145 return (unsigned long) sr;
146 }
147
148 /* Set ASID into SR */
set_asid(unsigned long asid)149 static inline void set_asid(unsigned long asid)
150 {
151 unsigned long long sr, pc;
152
153 asm volatile ("getcon " __c0 ", %0" : "=r" (sr));
154
155 sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
156
157 /*
158 * It is possible that this function may be inlined and so to avoid
159 * the assembler reporting duplicate symbols we make use of the gas trick
160 * of generating symbols using numerics and forward reference.
161 */
162 asm volatile ("movi 1, %1\n\t"
163 "shlli %1, 28, %1\n\t"
164 "or %0, %1, %1\n\t"
165 "putcon %1, " __c0 "\n\t"
166 "putcon %0, " __c1 "\n\t"
167 "_loada 1f, %1\n\t"
168 "ori %1, 1 , %1\n\t"
169 "putcon %1, " __c8 "\n\t"
170 "rte\n"
171 "1:\n\t"
172 : "=r" (sr), "=r" (pc) : "0" (sr));
173 }
174
175 /*
176 * After we have set current->mm to a new value, this activates
177 * the context for the new mm so we see the new mappings.
178 */
activate_context(struct mm_struct * mm)179 static __inline__ void activate_context(struct mm_struct *mm)
180 {
181 get_mmu_context(mm);
182 set_asid(mm->context & MMU_CONTEXT_ASID_MASK);
183 }
184
185
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk,unsigned int cpu)186 static __inline__ void switch_mm(struct mm_struct *prev,
187 struct mm_struct *next,
188 struct task_struct *tsk, unsigned int cpu)
189 {
190 set_bit(cpu, &next->cpu_vm_mask);
191
192 if (prev != next) {
193 mmu_pdtp_cache = next->pgd;
194 activate_context(next);
195 clear_bit(cpu, &prev->cpu_vm_mask);
196 }
197 }
198
199 #define activate_mm(prev, next) \
200 switch_mm((prev),(next),NULL,smp_processor_id())
201
202 static __inline__ void
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk,unsigned cpu)203 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
204 {
205 }
206
207 #endif /* __ASSEMBLY__ */
208
209 #endif /* __ASM_SH64_MMU_CONTEXT_H */
210