1 /*
2  * linux/arch/m68k/mm/motorola.c
3  *
4  * Routines specific to the Motorola MMU, originally from:
5  * linux/arch/m68k/init.c
6  * which are Copyright (C) 1995 Hamish Macdonald
7  *
8  * Moved 8/20/1999 Sam Creasey
9  */
10 
11 #include <linux/module.h>
12 #include <linux/signal.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/swap.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
19 #include <linux/init.h>
20 #include <linux/bootmem.h>
21 #include <linux/gfp.h>
22 
23 #include <asm/setup.h>
24 #include <asm/uaccess.h>
25 #include <asm/page.h>
26 #include <asm/pgalloc.h>
27 #include <asm/system.h>
28 #include <asm/machdep.h>
29 #include <asm/io.h>
30 #include <asm/dma.h>
31 #ifdef CONFIG_ATARI
32 #include <asm/atari_stram.h>
33 #endif
34 #include <asm/sections.h>
35 
36 #undef DEBUG
37 
38 #ifndef mm_cachebits
39 /*
40  * Bits to add to page descriptors for "normal" caching mode.
41  * For 68020/030 this is 0.
42  * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
43  */
44 unsigned long mm_cachebits;
45 EXPORT_SYMBOL(mm_cachebits);
46 #endif
47 
48 /* size of memory already mapped in head.S */
49 #define INIT_MAPPED_SIZE	(4UL<<20)
50 
51 extern unsigned long availmem;
52 
kernel_page_table(void)53 static pte_t * __init kernel_page_table(void)
54 {
55 	pte_t *ptablep;
56 
57 	ptablep = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
58 
59 	clear_page(ptablep);
60 	__flush_page_to_ram(ptablep);
61 	flush_tlb_kernel_page(ptablep);
62 	nocache_page(ptablep);
63 
64 	return ptablep;
65 }
66 
67 static pmd_t *last_pgtable __initdata = NULL;
68 pmd_t *zero_pgtable __initdata = NULL;
69 
kernel_ptr_table(void)70 static pmd_t * __init kernel_ptr_table(void)
71 {
72 	if (!last_pgtable) {
73 		unsigned long pmd, last;
74 		int i;
75 
76 		/* Find the last ptr table that was used in head.S and
77 		 * reuse the remaining space in that page for further
78 		 * ptr tables.
79 		 */
80 		last = (unsigned long)kernel_pg_dir;
81 		for (i = 0; i < PTRS_PER_PGD; i++) {
82 			if (!pgd_present(kernel_pg_dir[i]))
83 				continue;
84 			pmd = __pgd_page(kernel_pg_dir[i]);
85 			if (pmd > last)
86 				last = pmd;
87 		}
88 
89 		last_pgtable = (pmd_t *)last;
90 #ifdef DEBUG
91 		printk("kernel_ptr_init: %p\n", last_pgtable);
92 #endif
93 	}
94 
95 	last_pgtable += PTRS_PER_PMD;
96 	if (((unsigned long)last_pgtable & ~PAGE_MASK) == 0) {
97 		last_pgtable = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
98 
99 		clear_page(last_pgtable);
100 		__flush_page_to_ram(last_pgtable);
101 		flush_tlb_kernel_page(last_pgtable);
102 		nocache_page(last_pgtable);
103 	}
104 
105 	return last_pgtable;
106 }
107 
map_node(int node)108 static void __init map_node(int node)
109 {
110 #define PTRTREESIZE (256*1024)
111 #define ROOTTREESIZE (32*1024*1024)
112 	unsigned long physaddr, virtaddr, size;
113 	pgd_t *pgd_dir;
114 	pmd_t *pmd_dir;
115 	pte_t *pte_dir;
116 
117 	size = m68k_memory[node].size;
118 	physaddr = m68k_memory[node].addr;
119 	virtaddr = (unsigned long)phys_to_virt(physaddr);
120 	physaddr |= m68k_supervisor_cachemode |
121 		    _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
122 	if (CPU_IS_040_OR_060)
123 		physaddr |= _PAGE_GLOBAL040;
124 
125 	while (size > 0) {
126 #ifdef DEBUG
127 		if (!(virtaddr & (PTRTREESIZE-1)))
128 			printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
129 				virtaddr);
130 #endif
131 		pgd_dir = pgd_offset_k(virtaddr);
132 		if (virtaddr && CPU_IS_020_OR_030) {
133 			if (!(virtaddr & (ROOTTREESIZE-1)) &&
134 			    size >= ROOTTREESIZE) {
135 #ifdef DEBUG
136 				printk ("[very early term]");
137 #endif
138 				pgd_val(*pgd_dir) = physaddr;
139 				size -= ROOTTREESIZE;
140 				virtaddr += ROOTTREESIZE;
141 				physaddr += ROOTTREESIZE;
142 				continue;
143 			}
144 		}
145 		if (!pgd_present(*pgd_dir)) {
146 			pmd_dir = kernel_ptr_table();
147 #ifdef DEBUG
148 			printk ("[new pointer %p]", pmd_dir);
149 #endif
150 			pgd_set(pgd_dir, pmd_dir);
151 		} else
152 			pmd_dir = pmd_offset(pgd_dir, virtaddr);
153 
154 		if (CPU_IS_020_OR_030) {
155 			if (virtaddr) {
156 #ifdef DEBUG
157 				printk ("[early term]");
158 #endif
159 				pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
160 				physaddr += PTRTREESIZE;
161 			} else {
162 				int i;
163 #ifdef DEBUG
164 				printk ("[zero map]");
165 #endif
166 				zero_pgtable = kernel_ptr_table();
167 				pte_dir = (pte_t *)zero_pgtable;
168 				pmd_dir->pmd[0] = virt_to_phys(pte_dir) |
169 					_PAGE_TABLE | _PAGE_ACCESSED;
170 				pte_val(*pte_dir++) = 0;
171 				physaddr += PAGE_SIZE;
172 				for (i = 1; i < 64; physaddr += PAGE_SIZE, i++)
173 					pte_val(*pte_dir++) = physaddr;
174 			}
175 			size -= PTRTREESIZE;
176 			virtaddr += PTRTREESIZE;
177 		} else {
178 			if (!pmd_present(*pmd_dir)) {
179 #ifdef DEBUG
180 				printk ("[new table]");
181 #endif
182 				pte_dir = kernel_page_table();
183 				pmd_set(pmd_dir, pte_dir);
184 			}
185 			pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
186 
187 			if (virtaddr) {
188 				if (!pte_present(*pte_dir))
189 					pte_val(*pte_dir) = physaddr;
190 			} else
191 				pte_val(*pte_dir) = 0;
192 			size -= PAGE_SIZE;
193 			virtaddr += PAGE_SIZE;
194 			physaddr += PAGE_SIZE;
195 		}
196 
197 	}
198 #ifdef DEBUG
199 	printk("\n");
200 #endif
201 }
202 
203 /*
204  * paging_init() continues the virtual memory environment setup which
205  * was begun by the code in arch/head.S.
206  */
paging_init(void)207 void __init paging_init(void)
208 {
209 	unsigned long zones_size[MAX_NR_ZONES] = { 0, };
210 	unsigned long min_addr, max_addr;
211 	unsigned long addr, size, end;
212 	int i;
213 
214 #ifdef DEBUG
215 	printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
216 #endif
217 
218 	/* Fix the cache mode in the page descriptors for the 680[46]0.  */
219 	if (CPU_IS_040_OR_060) {
220 		int i;
221 #ifndef mm_cachebits
222 		mm_cachebits = _PAGE_CACHE040;
223 #endif
224 		for (i = 0; i < 16; i++)
225 			pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
226 	}
227 
228 	min_addr = m68k_memory[0].addr;
229 	max_addr = min_addr + m68k_memory[0].size;
230 	for (i = 1; i < m68k_num_memory;) {
231 		if (m68k_memory[i].addr < min_addr) {
232 			printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
233 				m68k_memory[i].addr, m68k_memory[i].size);
234 			printk("Fix your bootloader or use a memfile to make use of this area!\n");
235 			m68k_num_memory--;
236 			memmove(m68k_memory + i, m68k_memory + i + 1,
237 				(m68k_num_memory - i) * sizeof(struct mem_info));
238 			continue;
239 		}
240 		addr = m68k_memory[i].addr + m68k_memory[i].size;
241 		if (addr > max_addr)
242 			max_addr = addr;
243 		i++;
244 	}
245 	m68k_memoffset = min_addr - PAGE_OFFSET;
246 	m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
247 
248 	module_fixup(NULL, __start_fixup, __stop_fixup);
249 	flush_icache();
250 
251 	high_memory = phys_to_virt(max_addr);
252 
253 	min_low_pfn = availmem >> PAGE_SHIFT;
254 	max_low_pfn = max_addr >> PAGE_SHIFT;
255 
256 	for (i = 0; i < m68k_num_memory; i++) {
257 		addr = m68k_memory[i].addr;
258 		end = addr + m68k_memory[i].size;
259 		m68k_setup_node(i);
260 		availmem = PAGE_ALIGN(availmem);
261 		availmem += init_bootmem_node(NODE_DATA(i),
262 					      availmem >> PAGE_SHIFT,
263 					      addr >> PAGE_SHIFT,
264 					      end >> PAGE_SHIFT);
265 	}
266 
267 	/*
268 	 * Map the physical memory available into the kernel virtual
269 	 * address space. First initialize the bootmem allocator with
270 	 * the memory we already mapped, so map_node() has something
271 	 * to allocate.
272 	 */
273 	addr = m68k_memory[0].addr;
274 	size = m68k_memory[0].size;
275 	free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr));
276 	map_node(0);
277 	if (size > INIT_MAPPED_SIZE)
278 		free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE);
279 
280 	for (i = 1; i < m68k_num_memory; i++)
281 		map_node(i);
282 
283 	flush_tlb_all();
284 
285 	/*
286 	 * initialize the bad page table and bad page to point
287 	 * to a couple of allocated pages
288 	 */
289 	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
290 
291 	/*
292 	 * Set up SFC/DFC registers
293 	 */
294 	set_fs(KERNEL_DS);
295 
296 #ifdef DEBUG
297 	printk ("before free_area_init\n");
298 #endif
299 	for (i = 0; i < m68k_num_memory; i++) {
300 		zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT;
301 		free_area_init_node(i, zones_size,
302 				    m68k_memory[i].addr >> PAGE_SHIFT, NULL);
303 		if (node_present_pages(i))
304 			node_set_state(i, N_NORMAL_MEMORY);
305 	}
306 }
307 
free_initmem(void)308 void free_initmem(void)
309 {
310 	unsigned long addr;
311 
312 	addr = (unsigned long)__init_begin;
313 	for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
314 		virt_to_page(addr)->flags &= ~(1 << PG_reserved);
315 		init_page_count(virt_to_page(addr));
316 		free_page(addr);
317 		totalram_pages++;
318 	}
319 }
320 
321 
322