1 /*
2  *  linux/arch/unicore32/mm/init.c
3  *
4  *  Copyright (C) 2010 GUAN Xue-tao
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18 #include <linux/highmem.h>
19 #include <linux/gfp.h>
20 #include <linux/memblock.h>
21 #include <linux/sort.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/export.h>
24 
25 #include <asm/sections.h>
26 #include <asm/setup.h>
27 #include <asm/sizes.h>
28 #include <asm/tlb.h>
29 #include <asm/memblock.h>
30 #include <mach/map.h>
31 
32 #include "mm.h"
33 
34 static unsigned long phys_initrd_start __initdata = 0x01000000;
35 static unsigned long phys_initrd_size __initdata = SZ_8M;
36 
early_initrd(char * p)37 static int __init early_initrd(char *p)
38 {
39 	unsigned long start, size;
40 	char *endp;
41 
42 	start = memparse(p, &endp);
43 	if (*endp == ',') {
44 		size = memparse(endp + 1, NULL);
45 
46 		phys_initrd_start = start;
47 		phys_initrd_size = size;
48 	}
49 	return 0;
50 }
51 early_param("initrd", early_initrd);
52 
53 /*
54  * This keeps memory configuration data used by a couple memory
55  * initialization functions, as well as show_mem() for the skipping
56  * of holes in the memory map.  It is populated by uc32_add_memory().
57  */
58 struct meminfo meminfo;
59 
show_mem(unsigned int filter)60 void show_mem(unsigned int filter)
61 {
62 	int free = 0, total = 0, reserved = 0;
63 	int shared = 0, cached = 0, slab = 0, i;
64 	struct meminfo *mi = &meminfo;
65 
66 	printk(KERN_DEFAULT "Mem-info:\n");
67 	show_free_areas(filter);
68 
69 	if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
70 		return;
71 
72 	for_each_bank(i, mi) {
73 		struct membank *bank = &mi->bank[i];
74 		unsigned int pfn1, pfn2;
75 		struct page *page, *end;
76 
77 		pfn1 = bank_pfn_start(bank);
78 		pfn2 = bank_pfn_end(bank);
79 
80 		page = pfn_to_page(pfn1);
81 		end  = pfn_to_page(pfn2 - 1) + 1;
82 
83 		do {
84 			total++;
85 			if (PageReserved(page))
86 				reserved++;
87 			else if (PageSwapCache(page))
88 				cached++;
89 			else if (PageSlab(page))
90 				slab++;
91 			else if (!page_count(page))
92 				free++;
93 			else
94 				shared += page_count(page) - 1;
95 			page++;
96 		} while (page < end);
97 	}
98 
99 	printk(KERN_DEFAULT "%d pages of RAM\n", total);
100 	printk(KERN_DEFAULT "%d free pages\n", free);
101 	printk(KERN_DEFAULT "%d reserved pages\n", reserved);
102 	printk(KERN_DEFAULT "%d slab pages\n", slab);
103 	printk(KERN_DEFAULT "%d pages shared\n", shared);
104 	printk(KERN_DEFAULT "%d pages swap cached\n", cached);
105 }
106 
find_limits(unsigned long * min,unsigned long * max_low,unsigned long * max_high)107 static void __init find_limits(unsigned long *min, unsigned long *max_low,
108 	unsigned long *max_high)
109 {
110 	struct meminfo *mi = &meminfo;
111 	int i;
112 
113 	*min = -1UL;
114 	*max_low = *max_high = 0;
115 
116 	for_each_bank(i, mi) {
117 		struct membank *bank = &mi->bank[i];
118 		unsigned long start, end;
119 
120 		start = bank_pfn_start(bank);
121 		end = bank_pfn_end(bank);
122 
123 		if (*min > start)
124 			*min = start;
125 		if (*max_high < end)
126 			*max_high = end;
127 		if (bank->highmem)
128 			continue;
129 		if (*max_low < end)
130 			*max_low = end;
131 	}
132 }
133 
uc32_bootmem_init(unsigned long start_pfn,unsigned long end_pfn)134 static void __init uc32_bootmem_init(unsigned long start_pfn,
135 	unsigned long end_pfn)
136 {
137 	struct memblock_region *reg;
138 	unsigned int boot_pages;
139 	phys_addr_t bitmap;
140 	pg_data_t *pgdat;
141 
142 	/*
143 	 * Allocate the bootmem bitmap page.  This must be in a region
144 	 * of memory which has already been mapped.
145 	 */
146 	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
147 	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
148 				__pfn_to_phys(end_pfn));
149 
150 	/*
151 	 * Initialise the bootmem allocator, handing the
152 	 * memory banks over to bootmem.
153 	 */
154 	node_set_online(0);
155 	pgdat = NODE_DATA(0);
156 	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
157 
158 	/* Free the lowmem regions from memblock into bootmem. */
159 	for_each_memblock(memory, reg) {
160 		unsigned long start = memblock_region_memory_base_pfn(reg);
161 		unsigned long end = memblock_region_memory_end_pfn(reg);
162 
163 		if (end >= end_pfn)
164 			end = end_pfn;
165 		if (start >= end)
166 			break;
167 
168 		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
169 	}
170 
171 	/* Reserve the lowmem memblock reserved regions in bootmem. */
172 	for_each_memblock(reserved, reg) {
173 		unsigned long start = memblock_region_reserved_base_pfn(reg);
174 		unsigned long end = memblock_region_reserved_end_pfn(reg);
175 
176 		if (end >= end_pfn)
177 			end = end_pfn;
178 		if (start >= end)
179 			break;
180 
181 		reserve_bootmem(__pfn_to_phys(start),
182 			(end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
183 	}
184 }
185 
uc32_bootmem_free(unsigned long min,unsigned long max_low,unsigned long max_high)186 static void __init uc32_bootmem_free(unsigned long min, unsigned long max_low,
187 	unsigned long max_high)
188 {
189 	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
190 	struct memblock_region *reg;
191 
192 	/*
193 	 * initialise the zones.
194 	 */
195 	memset(zone_size, 0, sizeof(zone_size));
196 
197 	/*
198 	 * The memory size has already been determined.  If we need
199 	 * to do anything fancy with the allocation of this memory
200 	 * to the zones, now is the time to do it.
201 	 */
202 	zone_size[0] = max_low - min;
203 
204 	/*
205 	 * Calculate the size of the holes.
206 	 *  holes = node_size - sum(bank_sizes)
207 	 */
208 	memcpy(zhole_size, zone_size, sizeof(zhole_size));
209 	for_each_memblock(memory, reg) {
210 		unsigned long start = memblock_region_memory_base_pfn(reg);
211 		unsigned long end = memblock_region_memory_end_pfn(reg);
212 
213 		if (start < max_low) {
214 			unsigned long low_end = min(end, max_low);
215 			zhole_size[0] -= low_end - start;
216 		}
217 	}
218 
219 	/*
220 	 * Adjust the sizes according to any special requirements for
221 	 * this machine type.
222 	 */
223 	arch_adjust_zones(zone_size, zhole_size);
224 
225 	free_area_init_node(0, zone_size, min, zhole_size);
226 }
227 
pfn_valid(unsigned long pfn)228 int pfn_valid(unsigned long pfn)
229 {
230 	return memblock_is_memory(pfn << PAGE_SHIFT);
231 }
232 EXPORT_SYMBOL(pfn_valid);
233 
uc32_memory_present(void)234 static void uc32_memory_present(void)
235 {
236 }
237 
meminfo_cmp(const void * _a,const void * _b)238 static int __init meminfo_cmp(const void *_a, const void *_b)
239 {
240 	const struct membank *a = _a, *b = _b;
241 	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
242 	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
243 }
244 
uc32_memblock_init(struct meminfo * mi)245 void __init uc32_memblock_init(struct meminfo *mi)
246 {
247 	int i;
248 
249 	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
250 		meminfo_cmp, NULL);
251 
252 	for (i = 0; i < mi->nr_banks; i++)
253 		memblock_add(mi->bank[i].start, mi->bank[i].size);
254 
255 	/* Register the kernel text, kernel data and initrd with memblock. */
256 	memblock_reserve(__pa(_text), _end - _text);
257 
258 #ifdef CONFIG_BLK_DEV_INITRD
259 	if (phys_initrd_size) {
260 		memblock_reserve(phys_initrd_start, phys_initrd_size);
261 
262 		/* Now convert initrd to virtual addresses */
263 		initrd_start = __phys_to_virt(phys_initrd_start);
264 		initrd_end = initrd_start + phys_initrd_size;
265 	}
266 #endif
267 
268 	uc32_mm_memblock_reserve();
269 
270 	memblock_allow_resize();
271 	memblock_dump_all();
272 }
273 
bootmem_init(void)274 void __init bootmem_init(void)
275 {
276 	unsigned long min, max_low, max_high;
277 
278 	max_low = max_high = 0;
279 
280 	find_limits(&min, &max_low, &max_high);
281 
282 	uc32_bootmem_init(min, max_low);
283 
284 #ifdef CONFIG_SWIOTLB
285 	swiotlb_init(1);
286 #endif
287 	/*
288 	 * Sparsemem tries to allocate bootmem in memory_present(),
289 	 * so must be done after the fixed reservations
290 	 */
291 	uc32_memory_present();
292 
293 	/*
294 	 * sparse_init() needs the bootmem allocator up and running.
295 	 */
296 	sparse_init();
297 
298 	/*
299 	 * Now free the memory - free_area_init_node needs
300 	 * the sparse mem_map arrays initialized by sparse_init()
301 	 * for memmap_init_zone(), otherwise all PFNs are invalid.
302 	 */
303 	uc32_bootmem_free(min, max_low, max_high);
304 
305 	high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
306 
307 	/*
308 	 * This doesn't seem to be used by the Linux memory manager any
309 	 * more, but is used by ll_rw_block.  If we can get rid of it, we
310 	 * also get rid of some of the stuff above as well.
311 	 *
312 	 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
313 	 * the system, not the maximum PFN.
314 	 */
315 	max_low_pfn = max_low - PHYS_PFN_OFFSET;
316 	max_pfn = max_high - PHYS_PFN_OFFSET;
317 }
318 
free_area(unsigned long pfn,unsigned long end,char * s)319 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
320 {
321 	unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
322 
323 	for (; pfn < end; pfn++) {
324 		struct page *page = pfn_to_page(pfn);
325 		ClearPageReserved(page);
326 		init_page_count(page);
327 		__free_page(page);
328 		pages++;
329 	}
330 
331 	if (size && s)
332 		printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
333 
334 	return pages;
335 }
336 
337 static inline void
free_memmap(unsigned long start_pfn,unsigned long end_pfn)338 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
339 {
340 	struct page *start_pg, *end_pg;
341 	unsigned long pg, pgend;
342 
343 	/*
344 	 * Convert start_pfn/end_pfn to a struct page pointer.
345 	 */
346 	start_pg = pfn_to_page(start_pfn - 1) + 1;
347 	end_pg = pfn_to_page(end_pfn);
348 
349 	/*
350 	 * Convert to physical addresses, and
351 	 * round start upwards and end downwards.
352 	 */
353 	pg = PAGE_ALIGN(__pa(start_pg));
354 	pgend = __pa(end_pg) & PAGE_MASK;
355 
356 	/*
357 	 * If there are free pages between these,
358 	 * free the section of the memmap array.
359 	 */
360 	if (pg < pgend)
361 		free_bootmem(pg, pgend - pg);
362 }
363 
364 /*
365  * The mem_map array can get very big.  Free the unused area of the memory map.
366  */
free_unused_memmap(struct meminfo * mi)367 static void __init free_unused_memmap(struct meminfo *mi)
368 {
369 	unsigned long bank_start, prev_bank_end = 0;
370 	unsigned int i;
371 
372 	/*
373 	 * This relies on each bank being in address order.
374 	 * The banks are sorted previously in bootmem_init().
375 	 */
376 	for_each_bank(i, mi) {
377 		struct membank *bank = &mi->bank[i];
378 
379 		bank_start = bank_pfn_start(bank);
380 
381 		/*
382 		 * If we had a previous bank, and there is a space
383 		 * between the current bank and the previous, free it.
384 		 */
385 		if (prev_bank_end && prev_bank_end < bank_start)
386 			free_memmap(prev_bank_end, bank_start);
387 
388 		/*
389 		 * Align up here since the VM subsystem insists that the
390 		 * memmap entries are valid from the bank end aligned to
391 		 * MAX_ORDER_NR_PAGES.
392 		 */
393 		prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
394 	}
395 }
396 
397 /*
398  * mem_init() marks the free areas in the mem_map and tells us how much
399  * memory is free.  This is done after various parts of the system have
400  * claimed their memory after the kernel image.
401  */
mem_init(void)402 void __init mem_init(void)
403 {
404 	unsigned long reserved_pages, free_pages;
405 	struct memblock_region *reg;
406 	int i;
407 
408 	max_mapnr   = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
409 
410 	/* this will put all unused low memory onto the freelists */
411 	free_unused_memmap(&meminfo);
412 
413 	totalram_pages += free_all_bootmem();
414 
415 	reserved_pages = free_pages = 0;
416 
417 	for_each_bank(i, &meminfo) {
418 		struct membank *bank = &meminfo.bank[i];
419 		unsigned int pfn1, pfn2;
420 		struct page *page, *end;
421 
422 		pfn1 = bank_pfn_start(bank);
423 		pfn2 = bank_pfn_end(bank);
424 
425 		page = pfn_to_page(pfn1);
426 		end  = pfn_to_page(pfn2 - 1) + 1;
427 
428 		do {
429 			if (PageReserved(page))
430 				reserved_pages++;
431 			else if (!page_count(page))
432 				free_pages++;
433 			page++;
434 		} while (page < end);
435 	}
436 
437 	/*
438 	 * Since our memory may not be contiguous, calculate the
439 	 * real number of pages we have in this system
440 	 */
441 	printk(KERN_INFO "Memory:");
442 	num_physpages = 0;
443 	for_each_memblock(memory, reg) {
444 		unsigned long pages = memblock_region_memory_end_pfn(reg) -
445 			memblock_region_memory_base_pfn(reg);
446 		num_physpages += pages;
447 		printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
448 	}
449 	printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
450 
451 	printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
452 		nr_free_pages() << (PAGE_SHIFT-10),
453 		free_pages << (PAGE_SHIFT-10),
454 		reserved_pages << (PAGE_SHIFT-10),
455 		totalhigh_pages << (PAGE_SHIFT-10));
456 
457 	printk(KERN_NOTICE "Virtual kernel memory layout:\n"
458 		"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
459 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
460 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
461 		"    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
462 		"      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
463 		"      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
464 		"      .data : 0x%p" " - 0x%p" "   (%4d kB)\n",
465 
466 		VECTORS_BASE, VECTORS_BASE + PAGE_SIZE,
467 		DIV_ROUND_UP(PAGE_SIZE, SZ_1K),
468 		VMALLOC_START, VMALLOC_END,
469 		DIV_ROUND_UP((VMALLOC_END - VMALLOC_START), SZ_1M),
470 		PAGE_OFFSET, (unsigned long)high_memory,
471 		DIV_ROUND_UP(((unsigned long)high_memory - PAGE_OFFSET), SZ_1M),
472 		MODULES_VADDR, MODULES_END,
473 		DIV_ROUND_UP((MODULES_END - MODULES_VADDR), SZ_1M),
474 
475 		__init_begin, __init_end,
476 		DIV_ROUND_UP((__init_end - __init_begin), SZ_1K),
477 		_stext, _etext,
478 		DIV_ROUND_UP((_etext - _stext), SZ_1K),
479 		_sdata, _edata,
480 		DIV_ROUND_UP((_edata - _sdata), SZ_1K));
481 
482 	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
483 	BUG_ON(TASK_SIZE				> MODULES_VADDR);
484 
485 	if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
486 		/*
487 		 * On a machine this small we won't get
488 		 * anywhere without overcommit, so turn
489 		 * it on by default.
490 		 */
491 		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
492 	}
493 }
494 
free_initmem(void)495 void free_initmem(void)
496 {
497 	totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
498 				    __phys_to_pfn(__pa(__init_end)),
499 				    "init");
500 }
501 
502 #ifdef CONFIG_BLK_DEV_INITRD
503 
504 static int keep_initrd;
505 
free_initrd_mem(unsigned long start,unsigned long end)506 void free_initrd_mem(unsigned long start, unsigned long end)
507 {
508 	if (!keep_initrd)
509 		totalram_pages += free_area(__phys_to_pfn(__pa(start)),
510 					    __phys_to_pfn(__pa(end)),
511 					    "initrd");
512 }
513 
keepinitrd_setup(char * __unused)514 static int __init keepinitrd_setup(char *__unused)
515 {
516 	keep_initrd = 1;
517 	return 1;
518 }
519 
520 __setup("keepinitrd", keepinitrd_setup);
521 #endif
522