1 /*
2 * linux/arch/alpha/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 */
6
7 /* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
8
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/init.h>
21 #include <linux/bootmem.h> /* max_low_pfn */
22 #include <linux/vmalloc.h>
23 #ifdef CONFIG_BLK_DEV_INITRD
24 #include <linux/blk.h>
25 #endif
26
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
30 #include <asm/pgalloc.h>
31 #include <asm/hwrpb.h>
32 #include <asm/dma.h>
33 #include <asm/mmu_context.h>
34 #include <asm/console.h>
35 #include <asm/tlb.h>
36
37 mmu_gather_t mmu_gathers[NR_CPUS];
38
39 unsigned long totalram_pages;
40
41 extern void die_if_kernel(char *,struct pt_regs *,long);
42
43 struct thread_struct original_pcb;
44
45 #ifndef CONFIG_SMP
46 struct pgtable_cache_struct quicklists;
47 #endif
48
49 pgd_t *
get_pgd_slow(void)50 get_pgd_slow(void)
51 {
52 pgd_t *ret, *init;
53
54 ret = (pgd_t *)__get_free_page(GFP_KERNEL);
55 init = pgd_offset(&init_mm, 0UL);
56 if (ret) {
57 clear_page(ret);
58 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
59 memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
60 (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
61 #else
62 pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
63 #endif
64
65 /* The last PGD entry is the VPTB self-map. */
66 pgd_val(ret[PTRS_PER_PGD-1])
67 = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
68 }
69 return ret;
70 }
71
do_check_pgt_cache(int low,int high)72 int do_check_pgt_cache(int low, int high)
73 {
74 int freed = 0;
75 if(pgtable_cache_size > high) {
76 do {
77 if(pgd_quicklist) {
78 free_pgd_slow(get_pgd_fast());
79 freed++;
80 }
81 if(pmd_quicklist) {
82 pmd_free_slow(pmd_alloc_one_fast(NULL, 0));
83 freed++;
84 }
85 if(pte_quicklist) {
86 pte_free_slow(pte_alloc_one_fast(NULL, 0));
87 freed++;
88 }
89 } while(pgtable_cache_size > low);
90 }
91 return freed;
92 }
93
94 /*
95 * BAD_PAGE is the page that is used for page faults when linux
96 * is out-of-memory. Older versions of linux just did a
97 * do_exit(), but using this instead means there is less risk
98 * for a process dying in kernel mode, possibly leaving an inode
99 * unused etc..
100 *
101 * BAD_PAGETABLE is the accompanying page-table: it is initialized
102 * to point to BAD_PAGE entries.
103 *
104 * ZERO_PAGE is a special page that is used for zero-initialized
105 * data and COW.
106 */
107 pmd_t *
__bad_pagetable(void)108 __bad_pagetable(void)
109 {
110 memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
111 return (pmd_t *) EMPTY_PGT;
112 }
113
114 pte_t
__bad_page(void)115 __bad_page(void)
116 {
117 memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
118 return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
119 }
120
121 #ifndef CONFIG_DISCONTIGMEM
122 void
show_mem(void)123 show_mem(void)
124 {
125 long i,free = 0,total = 0,reserved = 0;
126 long shared = 0, cached = 0;
127
128 printk("\nMem-info:\n");
129 show_free_areas();
130 printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
131 i = max_mapnr;
132 while (i-- > 0) {
133 total++;
134 if (PageReserved(mem_map+i))
135 reserved++;
136 else if (PageSwapCache(mem_map+i))
137 cached++;
138 else if (!page_count(mem_map+i))
139 free++;
140 else
141 shared += atomic_read(&mem_map[i].count) - 1;
142 }
143 printk("%ld pages of RAM\n",total);
144 printk("%ld free pages\n",free);
145 printk("%ld reserved pages\n",reserved);
146 printk("%ld pages shared\n",shared);
147 printk("%ld pages swap cached\n",cached);
148 printk("%ld pages in page table cache\n",pgtable_cache_size);
149 show_buffers();
150 }
151 #endif
152
153 static inline unsigned long
load_PCB(struct thread_struct * pcb)154 load_PCB(struct thread_struct * pcb)
155 {
156 register unsigned long sp __asm__("$30");
157 pcb->ksp = sp;
158 return __reload_thread(pcb);
159 }
160
161 /* Set up initial PCB, VPTB, and other such nicities. */
162
163 static inline void
switch_to_system_map(void)164 switch_to_system_map(void)
165 {
166 unsigned long newptbr;
167 unsigned long original_pcb_ptr;
168
169 /* Initialize the kernel's page tables. Linux puts the vptb in
170 the last slot of the L1 page table. */
171 memset(swapper_pg_dir, 0, PAGE_SIZE);
172 newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
173 pgd_val(swapper_pg_dir[1023]) =
174 (newptbr << 32) | pgprot_val(PAGE_KERNEL);
175
176 /* Set the vptb. This is often done by the bootloader, but
177 shouldn't be required. */
178 if (hwrpb->vptb != 0xfffffffe00000000) {
179 wrvptptr(0xfffffffe00000000);
180 hwrpb->vptb = 0xfffffffe00000000;
181 hwrpb_update_checksum(hwrpb);
182 }
183
184 /* Also set up the real kernel PCB while we're at it. */
185 init_task.thread.ptbr = newptbr;
186 init_task.thread.pal_flags = 1; /* set FEN, clear everything else */
187 init_task.thread.flags = 0;
188 original_pcb_ptr = load_PCB(&init_task.thread);
189 tbia();
190
191 /* Save off the contents of the original PCB so that we can
192 restore the original console's page tables for a clean reboot.
193
194 Note that the PCB is supposed to be a physical address, but
195 since KSEG values also happen to work, folks get confused.
196 Check this here. */
197
198 if (original_pcb_ptr < PAGE_OFFSET) {
199 original_pcb_ptr = (unsigned long)
200 phys_to_virt(original_pcb_ptr);
201 }
202 original_pcb = *(struct thread_struct *) original_pcb_ptr;
203 }
204
205 int callback_init_done;
206
207 void * __init
callback_init(void * kernel_end)208 callback_init(void * kernel_end)
209 {
210 struct crb_struct * crb;
211 pgd_t *pgd;
212 pmd_t *pmd;
213 void *two_pages;
214
215 /* Starting at the HWRPB, locate the CRB. */
216 crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
217
218 if (alpha_using_srm) {
219 /* Tell the console whither it is to be remapped. */
220 if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
221 __halt(); /* "We're boned." --Bender */
222
223 /* Edit the procedure descriptors for DISPATCH and FIXUP. */
224 crb->dispatch_va = (struct procdesc_struct *)
225 (VMALLOC_START + (unsigned long)crb->dispatch_va
226 - crb->map[0].va);
227 crb->fixup_va = (struct procdesc_struct *)
228 (VMALLOC_START + (unsigned long)crb->fixup_va
229 - crb->map[0].va);
230 }
231
232 switch_to_system_map();
233
234 /* Allocate one PGD and one PMD. In the case of SRM, we'll need
235 these to actually remap the console. There is an assumption
236 here that only one of each is needed, and this allows for 8MB.
237 On systems with larger consoles, additional pages will be
238 allocated as needed during the mapping process.
239
240 In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
241 we need to allocate the PGD we use for vmalloc before we start
242 forking other tasks. */
243
244 two_pages = (void *)
245 (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
246 kernel_end = two_pages + 2*PAGE_SIZE;
247 memset(two_pages, 0, 2*PAGE_SIZE);
248
249 pgd = pgd_offset_k(VMALLOC_START);
250 pgd_set(pgd, (pmd_t *)two_pages);
251 pmd = pmd_offset(pgd, VMALLOC_START);
252 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
253
254 if (alpha_using_srm) {
255 static struct vm_struct console_remap_vm;
256 unsigned long vaddr = VMALLOC_START;
257 long i, j;
258
259 /* Set up the third level PTEs and update the virtual
260 addresses of the CRB entries. */
261 for (i = 0; i < crb->map_entries; ++i) {
262 unsigned long paddr = crb->map[i].pa;
263 crb->map[i].va = vaddr;
264 for (j = 0; j < crb->map[i].count; ++j) {
265 /* Newer console's (especially on larger
266 systems) may require more pages of
267 PTEs. Grab additional pages as needed. */
268 if (pmd != pmd_offset(pgd, vaddr)) {
269 memset(kernel_end, 0, PAGE_SIZE);
270 pmd = pmd_offset(pgd, vaddr);
271 pmd_set(pmd, (pte_t *)kernel_end);
272 kernel_end += PAGE_SIZE;
273 }
274 set_pte(pte_offset(pmd, vaddr),
275 mk_pte_phys(paddr, PAGE_KERNEL));
276 paddr += PAGE_SIZE;
277 vaddr += PAGE_SIZE;
278 }
279 }
280
281 /* Let vmalloc know that we've allocated some space. */
282 console_remap_vm.flags = VM_ALLOC;
283 console_remap_vm.addr = VMALLOC_START;
284 console_remap_vm.size = vaddr - VMALLOC_START;
285 vmlist = &console_remap_vm;
286 }
287
288 callback_init_done = 1;
289 return kernel_end;
290 }
291
292
293 #ifndef CONFIG_DISCONTIGMEM
294 /*
295 * paging_init() sets up the memory map.
296 */
297 void
paging_init(void)298 paging_init(void)
299 {
300 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
301 unsigned long dma_pfn, high_pfn;
302
303 dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
304 high_pfn = max_low_pfn;
305
306 if (dma_pfn >= high_pfn)
307 zones_size[ZONE_DMA] = high_pfn;
308 else {
309 zones_size[ZONE_DMA] = dma_pfn;
310 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
311 }
312
313 /* Initialize mem_map[]. */
314 free_area_init(zones_size);
315
316 /* Initialize the kernel's ZERO_PGE. */
317 memset((void *)ZERO_PGE, 0, PAGE_SIZE);
318 }
319 #endif /* CONFIG_DISCONTIGMEM */
320
321 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
322 void
srm_paging_stop(void)323 srm_paging_stop (void)
324 {
325 /* Move the vptb back to where the SRM console expects it. */
326 swapper_pg_dir[1] = swapper_pg_dir[1023];
327 tbia();
328 wrvptptr(0x200000000);
329 hwrpb->vptb = 0x200000000;
330 hwrpb_update_checksum(hwrpb);
331
332 /* Reload the page tables that the console had in use. */
333 load_PCB(&original_pcb);
334 tbia();
335 }
336 #endif
337
338 #ifndef CONFIG_DISCONTIGMEM
339 static void __init
printk_memory_info(void)340 printk_memory_info(void)
341 {
342 unsigned long codesize, reservedpages, datasize, initsize, tmp;
343 extern int page_is_ram(unsigned long) __init;
344 extern char _text, _etext, _data, _edata;
345 extern char __init_begin, __init_end;
346
347 /* printk all informations */
348 reservedpages = 0;
349 for (tmp = 0; tmp < max_low_pfn; tmp++)
350 /*
351 * Only count reserved RAM pages
352 */
353 if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
354 reservedpages++;
355
356 codesize = (unsigned long) &_etext - (unsigned long) &_text;
357 datasize = (unsigned long) &_edata - (unsigned long) &_data;
358 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
359
360 printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n",
361 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
362 max_mapnr << (PAGE_SHIFT-10),
363 codesize >> 10,
364 reservedpages << (PAGE_SHIFT-10),
365 datasize >> 10,
366 initsize >> 10);
367 }
368
369 void __init
mem_init(void)370 mem_init(void)
371 {
372 max_mapnr = num_physpages = max_low_pfn;
373 totalram_pages += free_all_bootmem();
374 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
375
376 printk_memory_info();
377 }
378 #endif /* CONFIG_DISCONTIGMEM */
379
380 void
free_reserved_mem(void * start,void * end)381 free_reserved_mem(void *start, void *end)
382 {
383 void *__start = start;
384 for (; __start < end; __start += PAGE_SIZE) {
385 ClearPageReserved(virt_to_page(__start));
386 set_page_count(virt_to_page(__start), 1);
387 free_page((long)__start);
388 totalram_pages++;
389 }
390 }
391
392 void
free_initmem(void)393 free_initmem(void)
394 {
395 extern char __init_begin, __init_end;
396
397 free_reserved_mem(&__init_begin, &__init_end);
398 printk (KERN_INFO "Freeing unused kernel memory: %ldk freed\n",
399 (&__init_end - &__init_begin) >> 10);
400 }
401
402 #ifdef CONFIG_BLK_DEV_INITRD
403 void
free_initrd_mem(unsigned long start,unsigned long end)404 free_initrd_mem(unsigned long start, unsigned long end)
405 {
406 free_reserved_mem((void *)start, (void *)end);
407 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
408 (end - start) >> 10);
409 }
410 #endif
411
412 void
si_meminfo(struct sysinfo * val)413 si_meminfo(struct sysinfo *val)
414 {
415 val->totalram = totalram_pages;
416 val->sharedram = 0;
417 val->freeram = nr_free_pages();
418 val->bufferram = atomic_read(&buffermem_pages);
419 val->totalhigh = 0;
420 val->freehigh = 0;
421 val->mem_unit = PAGE_SIZE;
422 }
423