1 /*
2  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
3  * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
4  * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
19  */
20 #include <linux/config.h>
21 #include <linux/init.h>
22 #include <asm/mmu_context.h>
23 #include <asm/bootinfo.h>
24 #include <asm/cpu.h>
25 
26 extern void except_vec1_sb1(void);
27 
28 #define UNIQUE_ENTRYHI(idx) (KSEG0 + ((idx) << (PAGE_SHIFT + 1)))
29 
30 /* Dump the current entry* and pagemask registers */
dump_cur_tlb_regs(void)31 static inline void dump_cur_tlb_regs(void)
32 {
33 	unsigned int entryhihi, entryhilo, entrylo0hi, entrylo0lo, entrylo1hi;
34 	unsigned int entrylo1lo, pagemask;
35 
36 	__asm__ __volatile__ (
37 		".set push             \n"
38 		".set noreorder        \n"
39 		".set mips64           \n"
40 		".set noat             \n"
41 		"     tlbr             \n"
42 		"     dmfc0  $1, $10   \n"
43 		"     dsrl32 %0, $1, 0 \n"
44 		"     sll    %1, $1, 0 \n"
45 		"     dmfc0  $1, $2    \n"
46 		"     dsrl32 %2, $1, 0 \n"
47 		"     sll    %3, $1, 0 \n"
48 		"     dmfc0  $1, $3    \n"
49 		"     dsrl32 %4, $1, 0 \n"
50 		"     sll    %5, $1, 0 \n"
51 		"     mfc0   %6, $5    \n"
52 		".set pop              \n"
53 		: "=r" (entryhihi), "=r" (entryhilo),
54 		  "=r" (entrylo0hi), "=r" (entrylo0lo),
55 		  "=r" (entrylo1hi), "=r" (entrylo1lo),
56 		  "=r" (pagemask));
57 
58 	printk("%08X%08X %08X%08X %08X%08X %08X",
59 	       entryhihi, entryhilo,
60 	       entrylo0hi, entrylo0lo,
61 	       entrylo1hi, entrylo1lo,
62 	       pagemask);
63 }
64 
sb1_dump_tlb(void)65 void sb1_dump_tlb(void)
66 {
67 	unsigned long old_ctx;
68 	unsigned long flags;
69 	int entry;
70 	local_irq_save(flags);
71 	old_ctx = read_c0_entryhi();
72 	printk("Current TLB registers state:\n"
73 	       "      EntryHi       EntryLo0          EntryLo1     PageMask  Index\n"
74 	       "--------------------------------------------------------------------\n");
75 	dump_cur_tlb_regs();
76 	printk(" %08X\n", read_c0_index());
77 	printk("\n\nFull TLB Dump:\n"
78 	       "Idx      EntryHi       EntryLo0          EntryLo1     PageMask\n"
79 	       "--------------------------------------------------------------\n");
80 	for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
81 		write_c0_index(entry);
82 		printk("\n%02i ", entry);
83 		dump_cur_tlb_regs();
84 	}
85 	printk("\n");
86 	write_c0_entryhi(old_ctx);
87 	local_irq_restore(flags);
88 }
89 
local_flush_tlb_all(void)90 void local_flush_tlb_all(void)
91 {
92 	unsigned long flags;
93 	unsigned long old_ctx;
94 	int entry;
95 
96 	local_irq_save(flags);
97 	/* Save old context and create impossible VPN2 value */
98 	old_ctx = read_c0_entryhi() & ASID_MASK;
99 	write_c0_entrylo0(0);
100 	write_c0_entrylo1(0);
101 
102 	entry = read_c0_wired();
103 	while (entry < current_cpu_data.tlbsize) {
104 		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
105 		write_c0_index(entry);
106 		tlb_write_indexed();
107 		entry++;
108 	}
109 	write_c0_entryhi(old_ctx);
110 	local_irq_restore(flags);
111 }
112 
113 
114 /*
115  * Use a bogus region of memory (starting at 0) to sanitize the TLB's.
116  * Use increments of the maximum page size (16MB), and check for duplicate
117  * entries before doing a given write.  Then, when we're safe from collisions
118  * with the firmware, go back and give all the entries invalid addresses with
119  * the normal flush routine.  Wired entries will be killed as well!
120  */
sb1_sanitize_tlb(void)121 static void __init sb1_sanitize_tlb(void)
122 {
123 	int entry;
124 	long addr = 0;
125 
126 	long inc = 1<<24;  /* 16MB */
127 	/* Save old context and create impossible VPN2 value */
128 	write_c0_entrylo0(0);
129 	write_c0_entrylo1(0);
130 	for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
131 		do {
132 			addr += inc;
133 			write_c0_entryhi(addr);
134 			tlb_probe();
135 		} while ((int)(read_c0_index()) >= 0);
136 		write_c0_index(entry);
137 		tlb_write_indexed();
138 	}
139 	/* Now that we know we're safe from collisions, we can safely flush
140 	   the TLB with the "normal" routine. */
141 	local_flush_tlb_all();
142 }
143 
144 
local_flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)145 void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
146                       unsigned long end)
147 {
148 	unsigned long flags;
149 	int cpu;
150 
151 	local_irq_save(flags);
152 	cpu = smp_processor_id();
153 	if (cpu_context(cpu, mm) != 0) {
154 		int size;
155 		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
156 		size = (size + 1) >> 1;
157 		if (size <= (current_cpu_data.tlbsize/2)) {
158 			int oldpid = read_c0_entryhi() & ASID_MASK;
159 			int newpid = cpu_asid(cpu, mm);
160 
161 			start &= (PAGE_MASK << 1);
162 			end += ((PAGE_SIZE << 1) - 1);
163 			end &= (PAGE_MASK << 1);
164 			while (start < end) {
165 				int idx;
166 
167 				write_c0_entryhi(start | newpid);
168 				start += (PAGE_SIZE << 1);
169 				tlb_probe();
170 				idx = read_c0_index();
171 				write_c0_entrylo0(0);
172 				write_c0_entrylo1(0);
173 				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
174 				if (idx < 0)
175 					continue;
176 				tlb_write_indexed();
177 			}
178 			write_c0_entryhi(oldpid);
179 		} else {
180 			drop_mmu_context(mm, cpu);
181 		}
182 	}
183 	local_irq_restore(flags);
184 }
185 
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)186 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
187 {
188 	unsigned long flags;
189 	int cpu = smp_processor_id();
190 
191 	local_irq_save(flags);
192 	if (cpu_context(cpu, vma->vm_mm) != 0) {
193 		int oldpid, newpid, idx;
194 		newpid = cpu_asid(cpu, vma->vm_mm);
195 		page &= (PAGE_MASK << 1);
196 		oldpid = read_c0_entryhi() & ASID_MASK;
197 		write_c0_entryhi(page | newpid);
198 		tlb_probe();
199 		idx = read_c0_index();
200 		write_c0_entrylo0(0);
201 		write_c0_entrylo1(0);
202 		if (idx < 0)
203 			goto finish;
204 		/* Make sure all entries differ. */
205 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
206 		tlb_write_indexed();
207 	finish:
208 		write_c0_entryhi(oldpid);
209 	}
210 	local_irq_restore(flags);
211 }
212 
213 /*
214  * Remove one kernel space TLB entry.  This entry is assumed to be marked
215  * global so we don't do the ASID thing.
216  */
local_flush_tlb_one(unsigned long page)217 void local_flush_tlb_one(unsigned long page)
218 {
219 	unsigned long flags;
220 	int oldpid, idx;
221 
222 	page &= (PAGE_MASK << 1);
223 	oldpid = read_c0_entryhi() & ASID_MASK;
224 
225 	local_irq_save(flags);
226 	write_c0_entryhi(page);
227 	tlb_probe();
228 	idx = read_c0_index();
229 	if (idx >= 0) {
230 		/* Make sure all entries differ. */
231 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
232 		write_c0_entrylo0(0);
233 		write_c0_entrylo1(0);
234 		tlb_write_indexed();
235 	}
236 
237 	write_c0_entryhi(oldpid);
238 	local_irq_restore(flags);
239 }
240 
241 /* All entries common to a mm share an asid.  To effectively flush
242    these entries, we just bump the asid. */
local_flush_tlb_mm(struct mm_struct * mm)243 void local_flush_tlb_mm(struct mm_struct *mm)
244 {
245 	int cpu = smp_processor_id();
246 	if (cpu_context(cpu, mm) != 0) {
247 		drop_mmu_context(mm, cpu);
248 	}
249 }
250 
251 /* Stolen from mips32 routines */
252 
__update_tlb(struct vm_area_struct * vma,unsigned long address,pte_t pte)253 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
254 {
255 	unsigned long flags;
256 	pgd_t *pgdp;
257 	pmd_t *pmdp;
258 	pte_t *ptep;
259 	int idx, pid;
260 
261 	/*
262 	 * Handle debugger faulting in for debugee.
263 	 */
264 	if (current->active_mm != vma->vm_mm)
265 		return;
266 
267 	local_irq_save(flags);
268 
269 	pid = read_c0_entryhi() & ASID_MASK;
270 	address &= (PAGE_MASK << 1);
271 	write_c0_entryhi(address | (pid));
272 	pgdp = pgd_offset(vma->vm_mm, address);
273 	tlb_probe();
274 	pmdp = pmd_offset(pgdp, address);
275 	idx = read_c0_index();
276 	ptep = pte_offset(pmdp, address);
277 	write_c0_entrylo0(pte_val(*ptep++) >> 6);
278 	write_c0_entrylo1(pte_val(*ptep) >> 6);
279 	if (idx < 0) {
280 		tlb_write_random();
281 	} else {
282 		tlb_write_indexed();
283 	}
284 	local_irq_restore(flags);
285 }
286 
add_wired_entry(unsigned long entrylo0,unsigned long entrylo1,unsigned long entryhi,unsigned long pagemask)287 void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
288 	unsigned long entryhi, unsigned long pagemask)
289 {
290 	unsigned long flags;
291 	unsigned long wired;
292 	unsigned long old_pagemask;
293 	unsigned long old_ctx;
294 
295 	local_irq_save(flags);
296 	old_ctx = read_c0_entryhi() & 0xff;
297 	old_pagemask = read_c0_pagemask();
298 	wired = read_c0_wired();
299 	write_c0_wired(wired + 1);
300 	write_c0_index(wired);
301 
302 	write_c0_pagemask(pagemask);
303 	write_c0_entryhi(entryhi);
304 	write_c0_entrylo0(entrylo0);
305 	write_c0_entrylo1(entrylo1);
306 	tlb_write_indexed();
307 
308 	write_c0_entryhi(old_ctx);
309 	write_c0_pagemask(old_pagemask);
310 
311 	local_flush_tlb_all();
312 	local_irq_restore(flags);
313 }
314 
315 /*
316  * This is called from loadmmu.c.  We have to set up all the
317  * memory management function pointers, as well as initialize
318  * the caches and tlbs
319  */
sb1_tlb_init(void)320 void sb1_tlb_init(void)
321 {
322 	write_c0_pagemask(PM_DEFAULT_MASK);
323 	write_c0_wired(0);
324 
325 	/*
326 	 * We don't know what state the firmware left the TLB's in, so this is
327 	 * the ultra-conservative way to flush the TLB's and avoid machine
328 	 * check exceptions due to duplicate TLB entries
329 	 */
330 	sb1_sanitize_tlb();
331 
332 	memcpy((void *)KSEG0 + 0x080, except_vec1_sb1, 0x80);
333 	flush_icache_range(KSEG0, KSEG0 + 0x80);
334 }
335