1 /*
2  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
3  * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
4  * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
19  */
20 #include <linux/config.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <asm/mmu_context.h>
24 #include <asm/bootinfo.h>
25 #include <asm/cpu.h>
26 
27 extern char except_vec0_sb1[];
28 
29 #define UNIQUE_ENTRYHI(idx) (KSEG0 + ((idx) << (PAGE_SHIFT + 1)))
30 
31 /* Dump the current entry* and pagemask registers */
dump_cur_tlb_regs(void)32 static inline void dump_cur_tlb_regs(void)
33 {
34 	unsigned int entryhihi, entryhilo, entrylo0hi, entrylo0lo, entrylo1hi;
35 	unsigned int entrylo1lo, pagemask;
36 
37 	__asm__ __volatile__ (
38 		".set push             \n"
39 		".set noreorder        \n"
40 		".set mips64           \n"
41 		".set noat             \n"
42 		"     tlbr             \n"
43 		"     dmfc0  $1, $10   \n"
44 		"     dsrl32 %0, $1, 0 \n"
45 		"     sll    %1, $1, 0 \n"
46 		"     dmfc0  $1, $2    \n"
47 		"     dsrl32 %2, $1, 0 \n"
48 		"     sll    %3, $1, 0 \n"
49 		"     dmfc0  $1, $3    \n"
50 		"     dsrl32 %4, $1, 0 \n"
51 		"     sll    %5, $1, 0 \n"
52 		"     mfc0   %6, $5    \n"
53 		".set pop              \n"
54 		: "=r" (entryhihi), "=r" (entryhilo),
55 		  "=r" (entrylo0hi), "=r" (entrylo0lo),
56 		  "=r" (entrylo1hi), "=r" (entrylo1lo),
57 		  "=r" (pagemask));
58 
59 	printk("%08X%08X %08X%08X %08X%08X %08X",
60 	       entryhihi, entryhilo,
61 	       entrylo0hi, entrylo0lo,
62 	       entrylo1hi, entrylo1lo,
63 	       pagemask);
64 }
65 
sb1_dump_tlb(void)66 void sb1_dump_tlb(void)
67 {
68 	unsigned long old_ctx;
69 	unsigned long flags;
70 	int entry;
71 	local_irq_save(flags);
72 	old_ctx = read_c0_entryhi();
73 	printk("Current TLB registers state:\n"
74 	       "      EntryHi       EntryLo0          EntryLo1     PageMask  Index\n"
75 	       "--------------------------------------------------------------------\n");
76 	dump_cur_tlb_regs();
77 	printk(" %08X\n", read_c0_index());
78 	printk("\n\nFull TLB Dump:\n"
79 	       "Idx      EntryHi       EntryLo0          EntryLo1     PageMask\n"
80 	       "--------------------------------------------------------------\n");
81 	for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
82 		write_c0_index(entry);
83 		printk("\n%02i ", entry);
84 		dump_cur_tlb_regs();
85 	}
86 	printk("\n");
87 	write_c0_entryhi(old_ctx);
88 	local_irq_restore(flags);
89 }
90 
local_flush_tlb_all(void)91 void local_flush_tlb_all(void)
92 {
93 	unsigned long flags;
94 	unsigned long old_ctx;
95 	int entry;
96 
97 	local_irq_save(flags);
98 	/* Save old context and create impossible VPN2 value */
99 	old_ctx = read_c0_entryhi() & ASID_MASK;
100 	write_c0_entrylo0(0);
101 	write_c0_entrylo1(0);
102 
103 	entry = read_c0_wired();
104 	while (entry < current_cpu_data.tlbsize) {
105 		write_c0_entryhi(UNIQUE_ENTRYHI(entry));
106 		write_c0_index(entry);
107 		tlb_write_indexed();
108 		entry++;
109 	}
110 	write_c0_entryhi(old_ctx);
111 	local_irq_restore(flags);
112 }
113 
114 
115 /*
116  * Use a bogus region of memory (starting at 0) to sanitize the TLB's.
117  * Use increments of the maximum page size (16MB), and check for duplicate
118  * entries before doing a given write.  Then, when we're safe from collisions
119  * with the firmware, go back and give all the entries invalid addresses with
120  * the normal flush routine.  Wired entries will be killed as well!
121  */
sb1_sanitize_tlb(void)122 static void __init sb1_sanitize_tlb(void)
123 {
124 	int entry;
125 	long addr = 0;
126 
127 	long inc = 1<<24;  /* 16MB */
128 	/* Save old context and create impossible VPN2 value */
129 	write_c0_entrylo0(0);
130 	write_c0_entrylo1(0);
131 	for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
132 		do {
133 			addr += inc;
134 			write_c0_entryhi(addr);
135 			tlb_probe();
136 		} while ((int)(read_c0_index()) >= 0);
137 		write_c0_index(entry);
138 		tlb_write_indexed();
139 	}
140 	/* Now that we know we're safe from collisions, we can safely flush
141 	   the TLB with the "normal" routine. */
142 	local_flush_tlb_all();
143 }
144 
145 
local_flush_tlb_range(struct mm_struct * mm,unsigned long start,unsigned long end)146 void local_flush_tlb_range(struct mm_struct *mm, unsigned long start,
147                       unsigned long end)
148 {
149 	unsigned long flags;
150 	int cpu;
151 
152 	local_irq_save(flags);
153 	cpu = smp_processor_id();
154 	if (cpu_context(cpu, mm) != 0) {
155 		int size;
156 		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
157 		size = (size + 1) >> 1;
158 		if (size <= (current_cpu_data.tlbsize/2)) {
159 			int oldpid = read_c0_entryhi() & ASID_MASK;
160 			int newpid = cpu_asid(cpu, mm);
161 
162 			start &= (PAGE_MASK << 1);
163 			end += ((PAGE_SIZE << 1) - 1);
164 			end &= (PAGE_MASK << 1);
165 			while (start < end) {
166 				int idx;
167 
168 				write_c0_entryhi(start | newpid);
169 				start += (PAGE_SIZE << 1);
170 				tlb_probe();
171 				idx = read_c0_index();
172 				write_c0_entrylo0(0);
173 				write_c0_entrylo1(0);
174 				write_c0_entryhi(UNIQUE_ENTRYHI(idx));
175 				if (idx < 0)
176 					continue;
177 				tlb_write_indexed();
178 			}
179 			write_c0_entryhi(oldpid);
180 		} else {
181 			drop_mmu_context(mm, cpu);
182 		}
183 	}
184 	local_irq_restore(flags);
185 }
186 
local_flush_tlb_page(struct vm_area_struct * vma,unsigned long page)187 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
188 {
189 	unsigned long flags;
190 	int cpu = smp_processor_id();
191 
192 	local_irq_save(flags);
193 	if (cpu_context(cpu, vma->vm_mm) != 0) {
194 		int oldpid, newpid, idx;
195 		newpid = cpu_asid(cpu, vma->vm_mm);
196 		page &= (PAGE_MASK << 1);
197 		oldpid = read_c0_entryhi() & ASID_MASK;
198 		write_c0_entryhi(page | newpid);
199 		tlb_probe();
200 		idx = read_c0_index();
201 		write_c0_entrylo0(0);
202 		write_c0_entrylo1(0);
203 		if (idx < 0)
204 			goto finish;
205 		/* Make sure all entries differ. */
206 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
207 		tlb_write_indexed();
208 	finish:
209 		write_c0_entryhi(oldpid);
210 	}
211 	local_irq_restore(flags);
212 }
213 
214 /*
215  * Remove one kernel space TLB entry.  This entry is assumed to be marked
216  * global so we don't do the ASID thing.
217  */
local_flush_tlb_one(unsigned long page)218 void local_flush_tlb_one(unsigned long page)
219 {
220 	unsigned long flags;
221 	int oldpid, idx;
222 
223 	page &= (PAGE_MASK << 1);
224 	oldpid = read_c0_entryhi() & ASID_MASK;
225 
226 	local_irq_save(flags);
227 	write_c0_entryhi(page);
228 	tlb_probe();
229 	idx = read_c0_index();
230 	if (idx >= 0) {
231 		/* Make sure all entries differ. */
232 		write_c0_entryhi(UNIQUE_ENTRYHI(idx));
233 		write_c0_entrylo0(0);
234 		write_c0_entrylo1(0);
235 		tlb_write_indexed();
236 	}
237 
238 	write_c0_entryhi(oldpid);
239 	local_irq_restore(flags);
240 }
241 
242 /* The highmem code wants this. */
243 EXPORT_SYMBOL(local_flush_tlb_one);
244 
245 /* All entries common to a mm share an asid.  To effectively flush
246    these entries, we just bump the asid. */
local_flush_tlb_mm(struct mm_struct * mm)247 void local_flush_tlb_mm(struct mm_struct *mm)
248 {
249 	int cpu = smp_processor_id();
250 	if (cpu_context(cpu, mm) != 0) {
251 		drop_mmu_context(mm, cpu);
252 	}
253 }
254 
255 /* Stolen from mips32 routines */
256 
__update_tlb(struct vm_area_struct * vma,unsigned long address,pte_t pte)257 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
258 {
259 	unsigned long flags;
260 	pgd_t *pgdp;
261 	pmd_t *pmdp;
262 	pte_t *ptep;
263 	int idx, pid;
264 
265 	/*
266 	 * Handle debugger faulting in for debugee.
267 	 */
268 	if (current->active_mm != vma->vm_mm)
269 		return;
270 
271 	local_irq_save(flags);
272 
273 	pid = read_c0_entryhi() & ASID_MASK;
274 	address &= (PAGE_MASK << 1);
275 	write_c0_entryhi(address | (pid));
276 	pgdp = pgd_offset(vma->vm_mm, address);
277 	tlb_probe();
278 	pmdp = pmd_offset(pgdp, address);
279 	idx = read_c0_index();
280 	ptep = pte_offset(pmdp, address);
281 	write_c0_entrylo0(pte_val(*ptep++) >> 6);
282 	write_c0_entrylo1(pte_val(*ptep) >> 6);
283 	if (idx < 0) {
284 		tlb_write_random();
285 	} else {
286 		tlb_write_indexed();
287 	}
288 	local_irq_restore(flags);
289 }
290 
add_wired_entry(unsigned long entrylo0,unsigned long entrylo1,unsigned long entryhi,unsigned long pagemask)291 void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
292 	unsigned long entryhi, unsigned long pagemask)
293 {
294 	unsigned long flags;
295 	unsigned long wired;
296 	unsigned long old_pagemask;
297 	unsigned long old_ctx;
298 
299 	local_irq_save(flags);
300 	old_ctx = read_c0_entryhi() & 0xff;
301 	old_pagemask = read_c0_pagemask();
302 	wired = read_c0_wired();
303 	write_c0_wired(wired + 1);
304 	write_c0_index(wired);
305 
306 	write_c0_pagemask(pagemask);
307 	write_c0_entryhi(entryhi);
308 	write_c0_entrylo0(entrylo0);
309 	write_c0_entrylo1(entrylo1);
310 	tlb_write_indexed();
311 
312 	write_c0_entryhi(old_ctx);
313 	write_c0_pagemask(old_pagemask);
314 
315 	local_flush_tlb_all();
316 	local_irq_restore(flags);
317 }
318 
319 /*
320  * This is called from loadmmu.c.  We have to set up all the
321  * memory management function pointers, as well as initialize
322  * the caches and tlbs
323  */
sb1_tlb_init(void)324 void sb1_tlb_init(void)
325 {
326 	write_c0_pagemask(PM_DEFAULT_MASK);
327 	write_c0_wired(0);
328 
329 	/*
330 	 * We don't know what state the firmware left the TLB's in, so this is
331 	 * the ultra-conservative way to flush the TLB's and avoid machine
332 	 * check exceptions due to duplicate TLB entries
333 	 */
334 	sb1_sanitize_tlb();
335 
336 	memcpy((void *)KSEG0, except_vec0_sb1, 0x80);
337 	flush_icache_range(KSEG0, KSEG0 + 0x80);
338 }
339