1 /*
2  *  linux/arch/i386/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *
6  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
7  */
8 
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/smp.h>
21 #include <linux/init.h>
22 #ifdef CONFIG_BLK_DEV_INITRD
23 #include <linux/blk.h>
24 #endif
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h>
27 #include <linux/bootmem.h>
28 #include <linux/slab.h>
29 
30 #include <asm/processor.h>
31 #include <asm/system.h>
32 #include <asm/uaccess.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/dma.h>
36 #include <asm/fixmap.h>
37 #include <asm/e820.h>
38 #include <asm/apic.h>
39 #include <asm/tlb.h>
40 
41 mmu_gather_t mmu_gathers[NR_CPUS];
42 unsigned long highstart_pfn, highend_pfn;
43 static unsigned long totalram_pages;
44 static unsigned long totalhigh_pages;
45 
do_check_pgt_cache(int low,int high)46 int do_check_pgt_cache(int low, int high)
47 {
48 	int freed = 0;
49 	if(pgtable_cache_size > high) {
50 		do {
51 			if (pgd_quicklist) {
52 				free_pgd_slow(get_pgd_fast());
53 				freed++;
54 			}
55 			if (pmd_quicklist) {
56 				pmd_free_slow(pmd_alloc_one_fast(NULL, 0));
57 				freed++;
58 			}
59 			if (pte_quicklist) {
60 				pte_free_slow(pte_alloc_one_fast(NULL, 0));
61 				freed++;
62 			}
63 		} while(pgtable_cache_size > low);
64 	}
65 	return freed;
66 }
67 
68 /*
69  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
70  * physical space so we can cache the place of the first one and move
71  * around without checking the pgd every time.
72  */
73 
74 #if CONFIG_HIGHMEM
75 pte_t *kmap_pte;
76 pgprot_t kmap_prot;
77 
78 #define kmap_get_fixmap_pte(vaddr)					\
79 	pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
80 
kmap_init(void)81 void __init kmap_init(void)
82 {
83 	unsigned long kmap_vstart;
84 
85 	/* cache the first kmap pte */
86 	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
87 	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
88 
89 	kmap_prot = PAGE_KERNEL;
90 }
91 #endif /* CONFIG_HIGHMEM */
92 
show_mem(void)93 void show_mem(void)
94 {
95 	int i, total = 0, reserved = 0;
96 	int shared = 0, cached = 0;
97 	int highmem = 0;
98 
99 	printk("Mem-info:\n");
100 	show_free_areas();
101 	printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
102 	i = max_mapnr;
103 	while (i-- > 0) {
104 		total++;
105 		if (PageHighMem(mem_map+i))
106 			highmem++;
107 		if (PageReserved(mem_map+i))
108 			reserved++;
109 		else if (PageSwapCache(mem_map+i))
110 			cached++;
111 		else if (page_count(mem_map+i))
112 			shared += page_count(mem_map+i) - 1;
113 	}
114 	printk("%d pages of RAM\n", total);
115 	printk("%d pages of HIGHMEM\n",highmem);
116 	printk("%d reserved pages\n",reserved);
117 	printk("%d pages shared\n",shared);
118 	printk("%d pages swap cached\n",cached);
119 	printk("%ld pages in page table cache\n",pgtable_cache_size);
120 	show_buffers();
121 }
122 
123 /* References to section boundaries */
124 
125 extern char _text, _etext, _edata, __bss_start, _end;
126 extern char __init_begin, __init_end;
127 
set_pte_phys(unsigned long vaddr,unsigned long phys,pgprot_t flags)128 static inline void set_pte_phys (unsigned long vaddr,
129 			unsigned long phys, pgprot_t flags)
130 {
131 	pgd_t *pgd;
132 	pmd_t *pmd;
133 	pte_t *pte;
134 
135 	pgd = swapper_pg_dir + __pgd_offset(vaddr);
136 	if (pgd_none(*pgd)) {
137 		printk("PAE BUG #00!\n");
138 		return;
139 	}
140 	pmd = pmd_offset(pgd, vaddr);
141 	if (pmd_none(*pmd)) {
142 		printk("PAE BUG #01!\n");
143 		return;
144 	}
145 	pte = pte_offset(pmd, vaddr);
146 	/* <phys,flags> stored as-is, to permit clearing entries */
147 	set_pte(pte, mk_pte_phys(phys, flags));
148 
149 	/*
150 	 * It's enough to flush this one mapping.
151 	 * (PGE mappings get flushed as well)
152 	 */
153 	__flush_tlb_one(vaddr);
154 }
155 
__set_fixmap(enum fixed_addresses idx,unsigned long phys,pgprot_t flags)156 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
157 {
158 	unsigned long address = __fix_to_virt(idx);
159 
160 	if (idx >= __end_of_fixed_addresses) {
161 		printk("Invalid __set_fixmap\n");
162 		return;
163 	}
164 	set_pte_phys(address, phys, flags);
165 }
166 
fixrange_init(unsigned long start,unsigned long end,pgd_t * pgd_base)167 static void __init fixrange_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
168 {
169 	pgd_t *pgd;
170 	pmd_t *pmd;
171 	pte_t *pte;
172 	int i, j;
173 	unsigned long vaddr;
174 
175 	vaddr = start;
176 	i = __pgd_offset(vaddr);
177 	j = __pmd_offset(vaddr);
178 	pgd = pgd_base + i;
179 
180 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
181 #if CONFIG_X86_PAE
182 		if (pgd_none(*pgd)) {
183 			pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
184 			set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
185 			if (pmd != pmd_offset(pgd, 0))
186 				printk("PAE BUG #02!\n");
187 		}
188 		pmd = pmd_offset(pgd, vaddr);
189 #else
190 		pmd = (pmd_t *)pgd;
191 #endif
192 		for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
193 			if (pmd_none(*pmd)) {
194 				pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
195 				set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
196 				if (pte != pte_offset(pmd, 0))
197 					BUG();
198 			}
199 			vaddr += PMD_SIZE;
200 		}
201 		j = 0;
202 	}
203 }
204 
pagetable_init(void)205 static void __init pagetable_init (void)
206 {
207 	unsigned long vaddr, end;
208 	pgd_t *pgd, *pgd_base;
209 	int i, j, k;
210 	pmd_t *pmd;
211 	pte_t *pte, *pte_base;
212 
213 	/*
214 	 * This can be zero as well - no problem, in that case we exit
215 	 * the loops anyway due to the PTRS_PER_* conditions.
216 	 */
217 	end = (unsigned long)__va(max_low_pfn*PAGE_SIZE);
218 
219 	pgd_base = swapper_pg_dir;
220 #if CONFIG_X86_PAE
221 	for (i = 0; i < PTRS_PER_PGD; i++)
222 		set_pgd(pgd_base + i, __pgd(1 + __pa(empty_zero_page)));
223 #endif
224 	i = __pgd_offset(PAGE_OFFSET);
225 	pgd = pgd_base + i;
226 
227 	for (; i < PTRS_PER_PGD; pgd++, i++) {
228 		vaddr = i*PGDIR_SIZE;
229 		if (end && (vaddr >= end))
230 			break;
231 #if CONFIG_X86_PAE
232 		pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
233 		set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
234 #else
235 		pmd = (pmd_t *)pgd;
236 #endif
237 		if (pmd != pmd_offset(pgd, 0))
238 			BUG();
239 		for (j = 0; j < PTRS_PER_PMD; pmd++, j++) {
240 			vaddr = i*PGDIR_SIZE + j*PMD_SIZE;
241 			if (end && (vaddr >= end))
242 				break;
243 			if (cpu_has_pse) {
244 				unsigned long __pe;
245 
246 				set_in_cr4(X86_CR4_PSE);
247 				boot_cpu_data.wp_works_ok = 1;
248 				__pe = _KERNPG_TABLE + _PAGE_PSE + __pa(vaddr);
249 				/* Make it "global" too if supported */
250 				if (cpu_has_pge) {
251 					set_in_cr4(X86_CR4_PGE);
252 					__pe += _PAGE_GLOBAL;
253 				}
254 				set_pmd(pmd, __pmd(__pe));
255 				continue;
256 			}
257 
258 			pte_base = pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
259 
260 			for (k = 0; k < PTRS_PER_PTE; pte++, k++) {
261 				vaddr = i*PGDIR_SIZE + j*PMD_SIZE + k*PAGE_SIZE;
262 				if (end && (vaddr >= end))
263 					break;
264 				*pte = mk_pte_phys(__pa(vaddr), PAGE_KERNEL);
265 			}
266 			set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte_base)));
267 			if (pte_base != pte_offset(pmd, 0))
268 				BUG();
269 
270 		}
271 	}
272 
273 	/*
274 	 * Fixed mappings, only the page table structure has to be
275 	 * created - mappings will be set by set_fixmap():
276 	 */
277 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
278 	fixrange_init(vaddr, 0, pgd_base);
279 
280 #if CONFIG_HIGHMEM
281 	/*
282 	 * Permanent kmaps:
283 	 */
284 	vaddr = PKMAP_BASE;
285 	fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
286 
287 	pgd = swapper_pg_dir + __pgd_offset(vaddr);
288 	pmd = pmd_offset(pgd, vaddr);
289 	pte = pte_offset(pmd, vaddr);
290 	pkmap_page_table = pte;
291 #endif
292 
293 #if CONFIG_X86_PAE
294 	/*
295 	 * Add low memory identity-mappings - SMP needs it when
296 	 * starting up on an AP from real-mode. In the non-PAE
297 	 * case we already have these mappings through head.S.
298 	 * All user-space mappings are explicitly cleared after
299 	 * SMP startup.
300 	 */
301 	pgd_base[0] = pgd_base[USER_PTRS_PER_PGD];
302 #endif
303 }
304 
zap_low_mappings(void)305 void __init zap_low_mappings (void)
306 {
307 	int i;
308 	/*
309 	 * Zap initial low-memory mappings.
310 	 *
311 	 * Note that "pgd_clear()" doesn't do it for
312 	 * us, because pgd_clear() is a no-op on i386.
313 	 */
314 	for (i = 0; i < USER_PTRS_PER_PGD; i++)
315 #if CONFIG_X86_PAE
316 		set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
317 #else
318 		set_pgd(swapper_pg_dir+i, __pgd(0));
319 #endif
320 	flush_tlb_all();
321 }
322 
zone_sizes_init(void)323 static void __init zone_sizes_init(void)
324 {
325 	unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
326 	unsigned int max_dma, high, low;
327 
328 	max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
329 	low = max_low_pfn;
330 	high = highend_pfn;
331 
332 	if (low < max_dma)
333 		zones_size[ZONE_DMA] = low;
334 	else {
335 		zones_size[ZONE_DMA] = max_dma;
336 		zones_size[ZONE_NORMAL] = low - max_dma;
337 #ifdef CONFIG_HIGHMEM
338 		zones_size[ZONE_HIGHMEM] = high - low;
339 #endif
340 	}
341 	free_area_init(zones_size);
342 }
343 
344 /*
345  * paging_init() sets up the page tables - note that the first 8MB are
346  * already mapped by head.S.
347  *
348  * This routines also unmaps the page at virtual kernel address 0, so
349  * that we can trap those pesky NULL-reference errors in the kernel.
350  */
paging_init(void)351 void __init paging_init(void)
352 {
353 	pagetable_init();
354 
355 	load_cr3(swapper_pg_dir);
356 
357 #if CONFIG_X86_PAE
358 	/*
359 	 * We will bail out later - printk doesn't work right now so
360 	 * the user would just see a hanging kernel.
361 	 */
362 	if (cpu_has_pae)
363 		set_in_cr4(X86_CR4_PAE);
364 #endif
365 
366 	__flush_tlb_all();
367 
368 #ifdef CONFIG_HIGHMEM
369 	kmap_init();
370 #endif
371 	zone_sizes_init();
372 }
373 
374 /*
375  * Test if the WP bit works in supervisor mode. It isn't supported on 386's
376  * and also on some strange 486's (NexGen etc.). All 586+'s are OK. The jumps
377  * before and after the test are here to work-around some nasty CPU bugs.
378  */
379 
380 /*
381  * This function cannot be __init, since exceptions don't work in that
382  * section.
383  */
384 static int __attribute__((noinline)) do_test_wp_bit(unsigned long vaddr);
385 
test_wp_bit(void)386 void __init test_wp_bit(void)
387 {
388 /*
389  * Ok, all PSE-capable CPUs are definitely handling the WP bit right.
390  */
391 	const unsigned long vaddr = PAGE_OFFSET;
392 	pgd_t *pgd;
393 	pmd_t *pmd;
394 	pte_t *pte, old_pte;
395 
396 	printk("Checking if this processor honours the WP bit even in supervisor mode... ");
397 
398 	pgd = swapper_pg_dir + __pgd_offset(vaddr);
399 	pmd = pmd_offset(pgd, vaddr);
400 	pte = pte_offset(pmd, vaddr);
401 	old_pte = *pte;
402 	*pte = mk_pte_phys(0, PAGE_READONLY);
403 	local_flush_tlb();
404 
405 	boot_cpu_data.wp_works_ok = do_test_wp_bit(vaddr);
406 
407 	*pte = old_pte;
408 	local_flush_tlb();
409 
410 	if (!boot_cpu_data.wp_works_ok) {
411 		printk("No.\n");
412 #ifdef CONFIG_X86_WP_WORKS_OK
413 		panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
414 #endif
415 	} else {
416 		printk("Ok.\n");
417 	}
418 }
419 
page_is_ram(unsigned long pagenr)420 static inline int page_is_ram (unsigned long pagenr)
421 {
422 	int i;
423 
424 	for (i = 0; i < e820.nr_map; i++) {
425 		unsigned long addr, end;
426 
427 		if (e820.map[i].type != E820_RAM)	/* not usable memory */
428 			continue;
429 		/*
430 		 *	!!!FIXME!!! Some BIOSen report areas as RAM that
431 		 *	are not. Notably the 640->1Mb area. We need a sanity
432 		 *	check here.
433 		 */
434 		addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
435 		end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
436 		if  ((pagenr >= addr) && (pagenr < end))
437 			return 1;
438 	}
439 	return 0;
440 }
441 
page_kills_ppro(unsigned long pagenr)442 static inline int page_kills_ppro(unsigned long pagenr)
443 {
444 	if(pagenr >= 0x70000 && pagenr <= 0x7003F)
445 		return 1;
446 	return 0;
447 }
448 
449 #ifdef CONFIG_HIGHMEM
one_highpage_init(struct page * page,int pfn,int bad_ppro)450 void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
451 {
452 	if (!page_is_ram(pfn)) {
453 		SetPageReserved(page);
454 		return;
455 	}
456 
457 	if (bad_ppro && page_kills_ppro(pfn)) {
458 		SetPageReserved(page);
459 		return;
460 	}
461 
462 	ClearPageReserved(page);
463 	set_bit(PG_highmem, &page->flags);
464 	atomic_set(&page->count, 1);
465 	__free_page(page);
466 	totalhigh_pages++;
467 }
468 #endif /* CONFIG_HIGHMEM */
469 
set_max_mapnr_init(void)470 static void __init set_max_mapnr_init(void)
471 {
472 #ifdef CONFIG_HIGHMEM
473         highmem_start_page = mem_map + highstart_pfn;
474         max_mapnr = num_physpages = highend_pfn;
475         num_mappedpages = max_low_pfn;
476 #else
477         max_mapnr = num_mappedpages = num_physpages = max_low_pfn;
478 #endif
479 }
480 
free_pages_init(void)481 static int __init free_pages_init(void)
482 {
483 	extern int ppro_with_ram_bug(void);
484 	int bad_ppro, reservedpages, pfn;
485 
486 	bad_ppro = ppro_with_ram_bug();
487 
488 	/* this will put all low memory onto the freelists */
489 	totalram_pages += free_all_bootmem();
490 
491 	reservedpages = 0;
492 	for (pfn = 0; pfn < max_low_pfn; pfn++) {
493 		/*
494 		 * Only count reserved RAM pages
495 		 */
496 		if (page_is_ram(pfn) && PageReserved(mem_map+pfn))
497 			reservedpages++;
498 	}
499 #ifdef CONFIG_HIGHMEM
500 	for (pfn = highend_pfn-1; pfn >= highstart_pfn; pfn--)
501 		one_highpage_init((struct page *) (mem_map + pfn), pfn, bad_ppro);
502 	totalram_pages += totalhigh_pages;
503 #endif
504 	return reservedpages;
505 }
506 
mem_init(void)507 void __init mem_init(void)
508 {
509 	int codesize, reservedpages, datasize, initsize;
510 
511 	if (!mem_map)
512 		BUG();
513 #ifdef CONFIG_HIGHMEM
514 	/* check that fixmap and pkmap do not overlap */
515 	if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
516 		printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
517 		printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
518 				PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
519 		BUG();
520 	}
521 #endif
522 	set_max_mapnr_init();
523 
524 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
525 
526 	/* clear the zero-page */
527 	memset(empty_zero_page, 0, PAGE_SIZE);
528 
529 	reservedpages = free_pages_init();
530 
531 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
532 	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
533 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
534 
535 	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
536 		(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
537 		max_mapnr << (PAGE_SHIFT-10),
538 		codesize >> 10,
539 		reservedpages << (PAGE_SHIFT-10),
540 		datasize >> 10,
541 		initsize >> 10,
542 		(unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
543 	       );
544 
545 #if CONFIG_X86_PAE
546 	if (!cpu_has_pae)
547 		panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
548 #endif
549 	if (boot_cpu_data.wp_works_ok < 0)
550 		test_wp_bit();
551 
552 	/*
553 	 * Subtle. SMP is doing it's boot stuff late (because it has to
554 	 * fork idle threads) - but it also needs low mappings for the
555 	 * protected-mode entry to work. We zap these entries only after
556 	 * the WP-bit has been tested.
557 	 */
558 #ifndef CONFIG_SMP
559 	zap_low_mappings();
560 #endif
561 
562 }
563 
564 /* This function must not be inlined */
do_test_wp_bit(unsigned long vaddr)565 static int __attribute__((noinline)) do_test_wp_bit(unsigned long vaddr)
566 {
567 	char tmp_reg;
568 	int flag;
569 
570 	__asm__ __volatile__(
571 		"	movb %0,%1	\n"
572 		"1:	movb %1,%0	\n"
573 		"	xorl %2,%2	\n"
574 		"2:			\n"
575 		".section __ex_table,\"a\"\n"
576 		"	.align 4	\n"
577 		"	.long 1b,2b	\n"
578 		".previous		\n"
579 		:"=m" (*(char *) vaddr),
580 		 "=q" (tmp_reg),
581 		 "=r" (flag)
582 		:"2" (1)
583 		:"memory");
584 
585 	return flag;
586 }
587 
free_initmem(void)588 void free_initmem(void)
589 {
590 	unsigned long addr;
591 
592 	addr = (unsigned long)(&__init_begin);
593 	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
594 		ClearPageReserved(virt_to_page(addr));
595 		set_page_count(virt_to_page(addr), 1);
596 		free_page(addr);
597 		totalram_pages++;
598 	}
599 	printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
600 }
601 
602 #ifdef CONFIG_BLK_DEV_INITRD
free_initrd_mem(unsigned long start,unsigned long end)603 void free_initrd_mem(unsigned long start, unsigned long end)
604 {
605 	if (start < end)
606 		printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
607 	for (; start < end; start += PAGE_SIZE) {
608 		ClearPageReserved(virt_to_page(start));
609 		set_page_count(virt_to_page(start), 1);
610 		free_page(start);
611 		totalram_pages++;
612 	}
613 }
614 #endif
615 
si_meminfo(struct sysinfo * val)616 void si_meminfo(struct sysinfo *val)
617 {
618 	val->totalram = totalram_pages;
619 	val->sharedram = 0;
620 	val->freeram = nr_free_pages();
621 	val->bufferram = atomic_read(&buffermem_pages);
622 	val->totalhigh = totalhigh_pages;
623 	val->freehigh = nr_free_highpages();
624 	val->mem_unit = PAGE_SIZE;
625 	return;
626 }
627 
628 #if defined(CONFIG_X86_PAE)
629 struct kmem_cache_s *pae_pgd_cachep;
pgtable_cache_init(void)630 void __init pgtable_cache_init(void)
631 {
632 	/*
633 	 * PAE pgds must be 16-byte aligned:
634 	 */
635 	pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0,
636 		SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
637 	if (!pae_pgd_cachep)
638 		panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
639 }
640 #endif /* CONFIG_X86_PAE */
641