1 /*
2  * This file contains the routines for handling the MMU on those
3  * PowerPC implementations where the MMU substantially follows the
4  * architecture specification.  This includes the 6xx, 7xx, 7xxx,
5  * 8260, and POWER3 implementations but excludes the 8xx and 4xx.
6  *  -- paulus
7  *
8  *  Derived from arch/ppc/mm/init.c:
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
12  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
13  *    Copyright (C) 1996 Paul Mackerras
14  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
15  *
16  *  Derived from "arch/i386/mm/init.c"
17  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
18  *
19  *  This program is free software; you can redistribute it and/or
20  *  modify it under the terms of the GNU General Public License
21  *  as published by the Free Software Foundation; either version
22  *  2 of the License, or (at your option) any later version.
23  *
24  */
25 
26 #include <linux/config.h>
27 #include <linux/kernel.h>
28 #include <linux/mm.h>
29 #include <linux/init.h>
30 
31 #include <asm/prom.h>
32 #include <asm/mmu.h>
33 #include <asm/machdep.h>
34 
35 #include "mmu_decl.h"
36 #include "mem_pieces.h"
37 
38 PTE *Hash, *Hash_end;
39 unsigned long Hash_size, Hash_mask;
40 unsigned long _SDR1;
41 
42 union ubat {			/* BAT register values to be loaded */
43 	BAT	bat;
44 #ifdef CONFIG_PPC64BRIDGE
45 	u64	word[2];
46 #else
47 	u32	word[2];
48 #endif
49 } BATS[4][2];			/* 4 pairs of IBAT, DBAT */
50 
51 struct batrange {		/* stores address ranges mapped by BATs */
52 	unsigned long start;
53 	unsigned long limit;
54 	unsigned long phys;
55 } bat_addrs[4];
56 
57 /*
58  * Return PA for this VA if it is mapped by a BAT, or 0
59  */
v_mapped_by_bats(unsigned long va)60 unsigned long v_mapped_by_bats(unsigned long va)
61 {
62 	int b;
63 	for (b = 0; b < 4; ++b)
64 		if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
65 			return bat_addrs[b].phys + (va - bat_addrs[b].start);
66 	return 0;
67 }
68 
69 /*
70  * Return VA for a given PA or 0 if not mapped
71  */
p_mapped_by_bats(unsigned long pa)72 unsigned long p_mapped_by_bats(unsigned long pa)
73 {
74 	int b;
75 	for (b = 0; b < 4; ++b)
76 		if (pa >= bat_addrs[b].phys
77 	    	    && pa < (bat_addrs[b].limit-bat_addrs[b].start)
78 		              +bat_addrs[b].phys)
79 			return bat_addrs[b].start+(pa-bat_addrs[b].phys);
80 	return 0;
81 }
82 
bat_mapin_ram(unsigned long bat2,unsigned long bat3)83 void __init bat_mapin_ram(unsigned long bat2, unsigned long bat3)
84 {
85 	unsigned long tot, done;
86 
87 	tot = total_lowmem;
88 	setbat(2, KERNELBASE, PPC_MEMSTART, bat2, _PAGE_KERNEL);
89 	done = (unsigned long)bat_addrs[2].limit - KERNELBASE + 1;
90 	if ((done < tot) && !bat_addrs[3].limit && bat3) {
91 		tot -= done;
92 		setbat(3, KERNELBASE+done, PPC_MEMSTART+done, bat3,
93 		       _PAGE_KERNEL);
94 	}
95 }
96 
97 /*
98  * Set up one of the I/D BAT (block address translation) register pairs.
99  * The parameters are not checked; in particular size must be a power
100  * of 2 between 128k and 256M.
101  */
setbat(int index,unsigned long virt,unsigned long phys,unsigned int size,int flags)102 void __init setbat(int index, unsigned long virt, unsigned long phys,
103 		   unsigned int size, int flags)
104 {
105 	unsigned int bl;
106 	int wimgxpp;
107 	union ubat *bat = BATS[index];
108 
109 	if (((flags & _PAGE_NO_CACHE) == 0) &&
110 	    (cur_cpu_spec[0]->cpu_features & CPU_FTR_NEED_COHERENT))
111 		flags |= _PAGE_COHERENT;
112 
113 	bl = (size >> 17) - 1;
114 	if (PVR_VER(mfspr(PVR)) != 1) {
115 		/* 603, 604, etc. */
116 		/* Do DBAT first */
117 		wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
118 				   | _PAGE_COHERENT | _PAGE_GUARDED);
119 		wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
120 		bat[1].word[0] = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
121 		bat[1].word[1] = phys | wimgxpp;
122 #ifndef CONFIG_KGDB /* want user access for breakpoints */
123 		if (flags & _PAGE_USER)
124 #endif
125 			bat[1].bat.batu.vp = 1;
126 		if (flags & _PAGE_GUARDED) {
127 			/* G bit must be zero in IBATs */
128 			bat[0].word[0] = bat[0].word[1] = 0;
129 		} else {
130 			/* make IBAT same as DBAT */
131 			bat[0] = bat[1];
132 		}
133 	} else {
134 		/* 601 cpu */
135 		if (bl > BL_8M)
136 			bl = BL_8M;
137 		wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
138 				   | _PAGE_COHERENT);
139 		wimgxpp |= (flags & _PAGE_RW)?
140 			((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;
141 		bat->word[0] = virt | wimgxpp | 4;	/* Ks=0, Ku=1 */
142 		bat->word[1] = phys | bl | 0x40;	/* V=1 */
143 	}
144 
145 	bat_addrs[index].start = virt;
146 	bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
147 	bat_addrs[index].phys = phys;
148 }
149 
150 /*
151  * Initialize the hash table and patch the instructions in hashtable.S.
152  */
MMU_init_hw(void)153 void __init MMU_init_hw(void)
154 {
155 	unsigned int hmask, mb, mb2;
156 	unsigned int n_hpteg, lg_n_hpteg;
157 
158 	extern unsigned int hash_page_patch_A[];
159 	extern unsigned int hash_page_patch_B[], hash_page_patch_C[];
160 	extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
161 
162 	if ((cur_cpu_spec[0]->cpu_features & CPU_FTR_HPTE_TABLE) == 0) {
163 		Hash_size = 0;
164 		Hash_end = 0;
165 		Hash = 0;
166 		return;
167 	}
168 
169 	if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
170 
171 #ifdef CONFIG_PPC64BRIDGE
172 #define LG_HPTEG_SIZE	7		/* 128 bytes per HPTEG */
173 #define SDR1_LOW_BITS	(lg_n_hpteg - 11)
174 #define MIN_N_HPTEG	2048		/* min 256kB hash table */
175 #else
176 #define LG_HPTEG_SIZE	6		/* 64 bytes per HPTEG */
177 #define SDR1_LOW_BITS	((n_hpteg - 1) >> 10)
178 #define MIN_N_HPTEG	1024		/* min 64kB hash table */
179 #endif
180 
181 #ifdef CONFIG_POWER4
182 	/* The hash table has already been allocated and initialized
183 	   in prom.c */
184 	n_hpteg = Hash_size >> LG_HPTEG_SIZE;
185 	lg_n_hpteg = __ilog2(n_hpteg);
186 
187 	/* Remove the hash table from the available memory */
188 	if (Hash)
189 		reserve_phys_mem(__pa(Hash), Hash_size);
190 
191 #else /* CONFIG_POWER4 */
192 	/*
193 	 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
194 	 * This is less than the recommended amount, but then
195 	 * Linux ain't AIX.
196 	 */
197 	n_hpteg = total_memory / (PAGE_SIZE * 8);
198 	if (n_hpteg < MIN_N_HPTEG)
199 		n_hpteg = MIN_N_HPTEG;
200 	lg_n_hpteg = __ilog2(n_hpteg);
201 	if (n_hpteg & (n_hpteg - 1)) {
202 		++lg_n_hpteg;		/* round up if not power of 2 */
203 		n_hpteg = 1 << lg_n_hpteg;
204 	}
205 	Hash_size = n_hpteg << LG_HPTEG_SIZE;
206 
207 	/*
208 	 * Find some memory for the hash table.
209 	 */
210 	if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
211 	Hash = mem_pieces_find(Hash_size, Hash_size);
212 	cacheable_memzero(Hash, Hash_size);
213 	_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
214 	Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
215 #endif /* CONFIG_POWER4 */
216 
217 	printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
218 	       total_memory >> 20, Hash_size >> 10, Hash);
219 
220 	/*
221 	 * Patch up the instructions in hashtable.S:create_hpte
222 	 */
223 	if ( ppc_md.progress ) ppc_md.progress("hash:patch", 0x345);
224 	Hash_mask = n_hpteg - 1;
225 	hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
226 	mb2 = mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
227 	if (lg_n_hpteg > 16)
228 		mb2 = 16 - LG_HPTEG_SIZE;
229 	hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff)
230 		| ((unsigned int)(Hash) >> 16);
231 	hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6);
232 	hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0) | (mb2 << 6);
233 	hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff) | hmask;
234 	hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff) | hmask;
235 
236 	/*
237 	 * Ensure that the locations we've patched have been written
238 	 * out from the data cache and invalidated in the instruction
239 	 * cache, on those machines with split caches.
240 	 */
241 	flush_icache_range((unsigned long) &hash_page_patch_A[0],
242 			   (unsigned long) &hash_page_patch_C[1]);
243 
244 	/*
245 	 * Patch up the instructions in hashtable.S:flush_hash_page
246 	 */
247 	flush_hash_patch_A[0] = (flush_hash_patch_A[0] & ~0xffff)
248 		| ((unsigned int)(Hash) >> 16);
249 	flush_hash_patch_A[1] = (flush_hash_patch_A[1] & ~0x7c0) | (mb << 6);
250 	flush_hash_patch_A[2] = (flush_hash_patch_A[2] & ~0x7c0) | (mb2 << 6);
251 	flush_hash_patch_B[0] = (flush_hash_patch_B[0] & ~0xffff) | hmask;
252 	flush_icache_range((unsigned long) &flush_hash_patch_A[0],
253 			   (unsigned long) &flush_hash_patch_B[1]);
254 
255 	if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
256 }
257 
258 /*
259  * This is called at the end of handling a user page fault, when the
260  * fault has been handled by updating a PTE in the linux page tables.
261  * We use it to preload an HPTE into the hash table corresponding to
262  * the updated linux PTE.
263  */
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t pte)264 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
265 		      pte_t pte)
266 {
267 	struct mm_struct *mm;
268 	pmd_t *pmd;
269 	pte_t *ptep;
270 	static int nopreload;
271 
272 	if (Hash == 0 || nopreload)
273 		return;
274 	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
275 	if (!pte_young(pte))
276 		return;
277 	mm = (address < TASK_SIZE)? vma->vm_mm: &init_mm;
278 	pmd = pmd_offset(pgd_offset(mm, address), address);
279 	if (!pmd_none(*pmd)) {
280 		ptep = pte_offset(pmd, address);
281 		add_hash_page(mm->context, address, ptep);
282 	}
283 }
284