1 /*
2  * This file contains the routines setting up the linux page tables.
3  *  -- paulus
4  *
5  *  Derived from arch/ppc/mm/init.c:
6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7  *
8  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
9  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
10  *    Copyright (C) 1996 Paul Mackerras
11  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
12  *
13  *  Derived from "arch/i386/mm/init.c"
14  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
15  *
16  *  This program is free software; you can redistribute it and/or
17  *  modify it under the terms of the GNU General Public License
18  *  as published by the Free Software Foundation; either version
19  *  2 of the License, or (at your option) any later version.
20  *
21  */
22 
23 #include <linux/config.h>
24 #include <linux/kernel.h>
25 #include <linux/types.h>
26 #include <linux/vmalloc.h>
27 #include <linux/init.h>
28 
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <asm/io.h>
32 #include <asm/machdep.h>
33 
34 #include "mmu_decl.h"
35 
36 unsigned long ioremap_base;
37 unsigned long ioremap_bot;
38 int io_bat_index;
39 
40 /* The maximum lowmem defaults to 768Mb, but this can be configured to
41  * another value.  On SMP, this value will be trimmed down to whatever
42  * can be covered by BATs.
43  */
44 #define MAX_LOW_MEM	CONFIG_LOWMEM_SIZE
45 
46 #ifndef CONFIG_SMP
47 struct pgtable_cache_struct quicklists;
48 #endif
49 
50 #if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
51 #define HAVE_BATS	1
52 #endif
53 
54 #ifdef HAVE_BATS
55 static unsigned long __bat2, __bat3;
56 #endif
57 
58 extern char etext[], _stext[];
59 
60 #ifdef HAVE_BATS
61 extern unsigned long v_mapped_by_bats(unsigned long va);
62 extern unsigned long p_mapped_by_bats(unsigned long pa);
63 void setbat(int index, unsigned long virt, unsigned long phys,
64 	    unsigned int size, int flags);
65 
66 #else /* !HAVE_BATS */
67 #define v_mapped_by_bats(x)	(0UL)
68 #define p_mapped_by_bats(x)	(0UL)
69 #endif /* HAVE_BATS */
70 
71 #ifdef CONFIG_PTE_64BIT
72 void *
ioremap(phys_addr_t addr,unsigned long size)73 ioremap(phys_addr_t addr, unsigned long size)
74 {
75 	phys_addr_t addr64 = fixup_bigphys_addr(addr, size);;
76 
77 	return ioremap64(addr64, size);
78 }
79 
80 void *
ioremap64(unsigned long long addr,unsigned long size)81 ioremap64(unsigned long long addr, unsigned long size)
82 {
83 	return __ioremap(addr, size, _PAGE_NO_CACHE);
84 }
85 
86 #else /* !CONFIG_PTE_64BIT */
87 
88 void *
ioremap(phys_addr_t addr,unsigned long size)89 ioremap(phys_addr_t addr, unsigned long size)
90 {
91 	return __ioremap(addr, size, _PAGE_NO_CACHE);
92 }
93 #endif /* CONFIG_PTE_64BIT */
94 
95 void *
__ioremap(phys_addr_t addr,unsigned long size,unsigned long flags)96 __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
97 {
98 	unsigned long v, i;
99 	phys_addr_t p;
100 	int err;
101 
102 	/*
103 	 * Choose an address to map it to.
104 	 * Once the vmalloc system is running, we use it.
105 	 * Before then, we use space going down from ioremap_base
106 	 * (ioremap_bot records where we're up to).
107 	 */
108 	p = addr & PAGE_MASK;
109 	size = PAGE_ALIGN(addr + size) - p;
110 
111 	/*
112 	 * If the address lies within the first 16 MB, assume it's in ISA
113 	 * memory space
114 	 */
115 	if (p < 16*1024*1024)
116 		p += _ISA_MEM_BASE;
117 
118 	/*
119 	 * Don't allow anybody to remap normal RAM that we're using.
120 	 * mem_init() sets high_memory so only do the check after that.
121 	 */
122 	if ( mem_init_done && (p < virt_to_phys(high_memory)) )
123 	{
124 		printk("__ioremap(): phys addr "PTE_FMT" is RAM lr %p\n", p,
125 		       __builtin_return_address(0));
126 		return NULL;
127 	}
128 
129 	if (size == 0)
130 		return NULL;
131 
132 	/*
133 	 * Is it already mapped?  Perhaps overlapped by a previous
134 	 * BAT mapping.  If the whole area is mapped then we're done,
135 	 * otherwise remap it since we want to keep the virt addrs for
136 	 * each request contiguous.
137 	 *
138 	 * We make the assumption here that if the bottom and top
139 	 * of the range we want are mapped then it's mapped to the
140 	 * same virt address (and this is contiguous).
141 	 *  -- Cort
142 	 */
143 	if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ )
144 		goto out;
145 
146 	if (mem_init_done) {
147 		struct vm_struct *area;
148 		area = get_vm_area(size, VM_IOREMAP);
149 		if (area == 0)
150 			return NULL;
151 		v = VMALLOC_VMADDR(area->addr);
152 	} else {
153 		v = (ioremap_bot -= size);
154 	}
155 
156 	if ((flags & _PAGE_PRESENT) == 0)
157 		flags |= _PAGE_KERNEL;
158 	if (flags & _PAGE_NO_CACHE)
159 		flags |= _PAGE_GUARDED;
160 
161 	/*
162 	 * Should check if it is a candidate for a BAT mapping
163 	 */
164 
165 	err = 0;
166 	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
167 		err = map_page(v+i, p+i, flags);
168 	if (err) {
169 		if (mem_init_done)
170 			vfree((void *)v);
171 		return NULL;
172 	}
173 
174 out:
175 	return (void *) (v + ((unsigned long)addr & ~PAGE_MASK));
176 }
177 
iounmap(void * addr)178 void iounmap(void *addr)
179 {
180 	/*
181 	 * If mapped by BATs then there is nothing to do.
182 	 * Calling vfree() generates a benign warning.
183 	 */
184 	if (v_mapped_by_bats((unsigned long)addr)) return;
185 
186 	if (addr > high_memory && (unsigned long) addr < ioremap_bot)
187 		vfree((void *) (PAGE_MASK & (unsigned long) addr));
188 }
189 
190 int
map_page(unsigned long va,phys_addr_t pa,int flags)191 map_page(unsigned long va, phys_addr_t pa, int flags)
192 {
193 	pmd_t *pd;
194 	pte_t *pg;
195 	int err = -ENOMEM;
196 
197 	spin_lock(&init_mm.page_table_lock);
198 	/* Use upper 10 bits of VA to index the first level map */
199 	pd = pmd_offset(pgd_offset_k(va), va);
200 	/* Use middle 10 bits of VA to index the second-level map */
201 	pg = pte_alloc(&init_mm, pd, va);
202 	if (pg != 0) {
203 		err = 0;
204 		set_pte(pg, mk_pte_phys(pa & PAGE_MASK, __pgprot(flags)));
205 		if (mem_init_done)
206 			flush_HPTE(0, va, pg);
207 	}
208 	spin_unlock(&init_mm.page_table_lock);
209 	return err;
210 }
211 
212 void __init
adjust_total_lowmem(void)213 adjust_total_lowmem(void)
214 {
215 	unsigned long max_low_mem = MAX_LOW_MEM;
216 
217 #ifdef HAVE_BATS
218 	unsigned long bat_max = 0x10000000;
219 	unsigned long align;
220 	unsigned long ram;
221 	int is601 = 0;
222 
223 	/* 601s have smaller BATs */
224 	if (PVR_VER(mfspr(PVR)) == 1) {
225 		bat_max = 0x00800000;
226 		is601 = 1;
227 	}
228 
229 	/* adjust BAT block size to max_low_mem */
230 	if (max_low_mem < bat_max)
231 		bat_max = max_low_mem;
232 
233 	/* adjust lowmem size to max_low_mem */
234 	if (max_low_mem < total_lowmem)
235 		ram = max_low_mem;
236 	else
237 		ram = total_lowmem;
238 
239 	/* Make sure we don't map a block larger than the
240 	   smallest alignment of the physical address. */
241 	/* alignment of PPC_MEMSTART */
242 	align = ~(PPC_MEMSTART-1) & PPC_MEMSTART;
243 	/* set BAT block size to MIN(max_size, align) */
244 	if (align && align < bat_max)
245 		bat_max = align;
246 
247 	/* Calculate BAT values */
248 	__bat2 = 1UL << __ilog2(ram);
249 	if (__bat2 > bat_max)
250 		__bat2 = bat_max;
251 	ram -= __bat2;
252 	if (ram) {
253 		__bat3 = 1UL << __ilog2(ram);
254 		if (__bat3 > bat_max)
255 			__bat3 = bat_max;
256 		ram -= __bat3;
257 	}
258 
259 	printk(KERN_INFO "Memory BAT mapping: BAT2=%ldMb, BAT3=%ldMb,"
260 			" residual: %ldMb\n", __bat2 >> 20, __bat3 >> 20,
261 			(total_lowmem - (__bat2 + __bat3)) >> 20);
262 
263 	/* On SMP, we limit the lowmem to the area mapped with BATs.
264 	 * We also assume nobody will do SMP with 601s
265 	 */
266 #ifdef CONFIG_SMP
267 	if (!is601)
268 		max_low_mem = __bat2 + __bat3;
269 #endif /* CONFIG_SMP */
270 
271 #endif /* HAVE_BATS */
272 	if (total_lowmem > max_low_mem) {
273 		total_lowmem = max_low_mem;
274 #ifndef CONFIG_HIGHMEM
275 		printk(KERN_INFO "Warning, memory limited to %ld Mb, use "
276 				"CONFIG_HIGHMEM to reach %ld Mb\n",
277 				max_low_mem >> 20, total_memory >> 20);
278 		total_memory = total_lowmem;
279 #endif /* CONFIG_HIGHMEM */
280 	}
281 }
282 
283 /*
284  * Map in all of physical memory starting at KERNELBASE.
285  */
mapin_ram(void)286 void __init mapin_ram(void)
287 {
288 	unsigned long v, p, s, f;
289 
290 #ifdef HAVE_BATS
291 	if (!__map_without_bats)
292 		bat_mapin_ram(__bat2, __bat3);
293 #endif /* HAVE_BATS */
294 
295 	v = KERNELBASE;
296 	p = PPC_MEMSTART;
297 	for (s = 0; s < total_lowmem; s += PAGE_SIZE) {
298 		/* On the MPC8xx, we want the page shared so we
299 		 * don't get ASID compares on kernel space.
300 		 */
301 		f = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_SHARED | _PAGE_HWEXEC;
302 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH)
303 		/* Allows stub to set breakpoints everywhere */
304 		f |= _PAGE_WRENABLE;
305 #else	/* !CONFIG_KGDB && !CONFIG_XMON && !CONFIG_BDI_SWITCH */
306 		if ((char *) v < _stext || (char *) v >= etext)
307 			f |= _PAGE_WRENABLE;
308 #ifdef CONFIG_PPC_STD_MMU
309 		else
310 			/* On the powerpc (not all), no user access
311 			   forces R/W kernel access */
312 			f |= _PAGE_USER;
313 #endif /* CONFIG_PPC_STD_MMU */
314 #endif /* CONFIG_KGDB || CONFIG_XMON */
315 		map_page(v, p, f);
316 		v += PAGE_SIZE;
317 		p += PAGE_SIZE;
318 	}
319 	if (ppc_md.progress)
320 		ppc_md.progress("MMU:mapin_ram done", 0x401);
321 }
322 
323 /* is x a power of 2? */
324 #define is_power_of_2(x)	((x) != 0 && (((x) & ((x) - 1)) == 0))
325 
326 /*
327  * Set up a mapping for a block of I/O.
328  * virt, phys, size must all be page-aligned.
329  * This should only be called before ioremap is called.
330  */
io_block_mapping(unsigned long virt,phys_addr_t phys,unsigned int size,int flags)331 void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
332 			     unsigned int size, int flags)
333 {
334 	int i;
335 
336 	if (virt > KERNELBASE && virt < ioremap_bot)
337 		ioremap_bot = ioremap_base = virt;
338 
339 #ifdef HAVE_BATS
340 	/*
341 	 * Use a BAT for this if possible...
342 	 */
343 	if (io_bat_index < 2 && is_power_of_2(size)
344 	    && (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
345 		setbat(io_bat_index, virt, phys, size, flags);
346 		++io_bat_index;
347 		return;
348 	}
349 #endif /* HAVE_BATS */
350 
351 	/* No BATs available, put it in the page tables. */
352 	for (i = 0; i < size; i += PAGE_SIZE)
353 		map_page(virt + i, phys + i, flags);
354 }
355 
356 /* Scan the real Linux page tables and return a PTE pointer for
357  * a virtual address in a context.
358  * Returns true (1) if PTE was found, zero otherwise.  The pointer to
359  * the PTE pointer is unmodified if PTE is not found.
360  */
361 int
get_pteptr(struct mm_struct * mm,unsigned long addr,pte_t ** ptep)362 get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
363 {
364         pgd_t	*pgd;
365         pmd_t	*pmd;
366         pte_t	*pte;
367         int     retval = 0;
368 
369         pgd = pgd_offset(mm, addr & PAGE_MASK);
370         if (pgd) {
371                 pmd = pmd_offset(pgd, addr & PAGE_MASK);
372                 if (pmd_present(*pmd)) {
373                         pte = pte_offset(pmd, addr & PAGE_MASK);
374                         if (pte) {
375 				retval = 1;
376 				*ptep = pte;
377                         }
378                 }
379         }
380         return(retval);
381 }
382 
383 /* Find physical address for this virtual address.  Normally used by
384  * I/O functions, but anyone can call it.
385  */
iopa(unsigned long addr)386 unsigned long iopa(unsigned long addr)
387 {
388 	unsigned long pa;
389 
390 	/* I don't know why this won't work on PMacs or CHRP.  It
391 	 * appears there is some bug, or there is some implicit
392 	 * mapping done not properly represented by BATs or in page
393 	 * tables.......I am actively working on resolving this, but
394 	 * can't hold up other stuff.  -- Dan
395 	 */
396 	pte_t *pte;
397 	struct mm_struct *mm;
398 
399 	/* Check the BATs */
400 	pa = v_mapped_by_bats(addr);
401 	if (pa)
402 		return pa;
403 
404 	/* Allow mapping of user addresses (within the thread)
405 	 * for DMA if necessary.
406 	 */
407 	if (addr < TASK_SIZE)
408 		mm = current->mm;
409 	else
410 		mm = &init_mm;
411 
412 	pa = 0;
413 	if (get_pteptr(mm, addr, &pte))
414 		pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
415 
416 	return(pa);
417 }
418 
419 /* This is will find the virtual address for a physical one....
420  * Swiped from APUS, could be dangerous :-).
421  * This is only a placeholder until I really find a way to make this
422  * work.  -- Dan
423  */
424 unsigned long
mm_ptov(unsigned long paddr)425 mm_ptov (unsigned long paddr)
426 {
427 	unsigned long ret;
428 #if 0
429 	if (paddr < 16*1024*1024)
430 		ret = ZTWO_VADDR(paddr);
431 	else {
432 		int i;
433 
434 		for (i = 0; i < kmap_chunk_count;){
435 			unsigned long phys = kmap_chunks[i++];
436 			unsigned long size = kmap_chunks[i++];
437 			unsigned long virt = kmap_chunks[i++];
438 			if (paddr >= phys
439 			    && paddr < (phys + size)){
440 				ret = virt + paddr - phys;
441 				goto exit;
442 			}
443 		}
444 
445 		ret = (unsigned long) __va(paddr);
446 	}
447 exit:
448 #ifdef DEBUGPV
449 	printk ("PTOV(%lx)=%lx\n", paddr, ret);
450 #endif
451 #else
452 	ret = (unsigned long)paddr + KERNELBASE;
453 #endif
454 	return ret;
455 }
456 
457