1 /*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/mman.h>
15 #include <linux/nodemask.h>
16 #include <linux/memblock.h>
17 #include <linux/fs.h>
18 #include <linux/vmalloc.h>
19
20 #include <asm/cp15.h>
21 #include <asm/cputype.h>
22 #include <asm/sections.h>
23 #include <asm/cachetype.h>
24 #include <asm/setup.h>
25 #include <asm/sizes.h>
26 #include <asm/smp_plat.h>
27 #include <asm/tlb.h>
28 #include <asm/highmem.h>
29 #include <asm/system_info.h>
30 #include <asm/traps.h>
31
32 #include <asm/mach/arch.h>
33 #include <asm/mach/map.h>
34
35 #include "mm.h"
36
37 /*
38 * empty_zero_page is a special page that is used for
39 * zero-initialized data and COW.
40 */
41 struct page *empty_zero_page;
42 EXPORT_SYMBOL(empty_zero_page);
43
44 /*
45 * The pmd table for the upper-most set of pages.
46 */
47 pmd_t *top_pmd;
48
49 #define CPOLICY_UNCACHED 0
50 #define CPOLICY_BUFFERED 1
51 #define CPOLICY_WRITETHROUGH 2
52 #define CPOLICY_WRITEBACK 3
53 #define CPOLICY_WRITEALLOC 4
54
55 static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
56 static unsigned int ecc_mask __initdata = 0;
57 pgprot_t pgprot_user;
58 pgprot_t pgprot_kernel;
59
60 EXPORT_SYMBOL(pgprot_user);
61 EXPORT_SYMBOL(pgprot_kernel);
62
63 struct cachepolicy {
64 const char policy[16];
65 unsigned int cr_mask;
66 pmdval_t pmd;
67 pteval_t pte;
68 };
69
70 static struct cachepolicy cache_policies[] __initdata = {
71 {
72 .policy = "uncached",
73 .cr_mask = CR_W|CR_C,
74 .pmd = PMD_SECT_UNCACHED,
75 .pte = L_PTE_MT_UNCACHED,
76 }, {
77 .policy = "buffered",
78 .cr_mask = CR_C,
79 .pmd = PMD_SECT_BUFFERED,
80 .pte = L_PTE_MT_BUFFERABLE,
81 }, {
82 .policy = "writethrough",
83 .cr_mask = 0,
84 .pmd = PMD_SECT_WT,
85 .pte = L_PTE_MT_WRITETHROUGH,
86 }, {
87 .policy = "writeback",
88 .cr_mask = 0,
89 .pmd = PMD_SECT_WB,
90 .pte = L_PTE_MT_WRITEBACK,
91 }, {
92 .policy = "writealloc",
93 .cr_mask = 0,
94 .pmd = PMD_SECT_WBWA,
95 .pte = L_PTE_MT_WRITEALLOC,
96 }
97 };
98
99 /*
100 * These are useful for identifying cache coherency
101 * problems by allowing the cache or the cache and
102 * writebuffer to be turned off. (Note: the write
103 * buffer should not be on and the cache off).
104 */
early_cachepolicy(char * p)105 static int __init early_cachepolicy(char *p)
106 {
107 int i;
108
109 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
110 int len = strlen(cache_policies[i].policy);
111
112 if (memcmp(p, cache_policies[i].policy, len) == 0) {
113 cachepolicy = i;
114 cr_alignment &= ~cache_policies[i].cr_mask;
115 cr_no_alignment &= ~cache_policies[i].cr_mask;
116 break;
117 }
118 }
119 if (i == ARRAY_SIZE(cache_policies))
120 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
121 /*
122 * This restriction is partly to do with the way we boot; it is
123 * unpredictable to have memory mapped using two different sets of
124 * memory attributes (shared, type, and cache attribs). We can not
125 * change these attributes once the initial assembly has setup the
126 * page tables.
127 */
128 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
129 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
130 cachepolicy = CPOLICY_WRITEBACK;
131 }
132 flush_cache_all();
133 set_cr(cr_alignment);
134 return 0;
135 }
136 early_param("cachepolicy", early_cachepolicy);
137
early_nocache(char * __unused)138 static int __init early_nocache(char *__unused)
139 {
140 char *p = "buffered";
141 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
142 early_cachepolicy(p);
143 return 0;
144 }
145 early_param("nocache", early_nocache);
146
early_nowrite(char * __unused)147 static int __init early_nowrite(char *__unused)
148 {
149 char *p = "uncached";
150 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
151 early_cachepolicy(p);
152 return 0;
153 }
154 early_param("nowb", early_nowrite);
155
156 #ifndef CONFIG_ARM_LPAE
early_ecc(char * p)157 static int __init early_ecc(char *p)
158 {
159 if (memcmp(p, "on", 2) == 0)
160 ecc_mask = PMD_PROTECTION;
161 else if (memcmp(p, "off", 3) == 0)
162 ecc_mask = 0;
163 return 0;
164 }
165 early_param("ecc", early_ecc);
166 #endif
167
noalign_setup(char * __unused)168 static int __init noalign_setup(char *__unused)
169 {
170 cr_alignment &= ~CR_A;
171 cr_no_alignment &= ~CR_A;
172 set_cr(cr_alignment);
173 return 1;
174 }
175 __setup("noalign", noalign_setup);
176
177 #ifndef CONFIG_SMP
adjust_cr(unsigned long mask,unsigned long set)178 void adjust_cr(unsigned long mask, unsigned long set)
179 {
180 unsigned long flags;
181
182 mask &= ~CR_A;
183
184 set &= mask;
185
186 local_irq_save(flags);
187
188 cr_no_alignment = (cr_no_alignment & ~mask) | set;
189 cr_alignment = (cr_alignment & ~mask) | set;
190
191 set_cr((get_cr() & ~mask) | set);
192
193 local_irq_restore(flags);
194 }
195 #endif
196
197 #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
198 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
199
200 static struct mem_type mem_types[] = {
201 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
202 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
203 L_PTE_SHARED,
204 .prot_l1 = PMD_TYPE_TABLE,
205 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
206 .domain = DOMAIN_IO,
207 },
208 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
209 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
210 .prot_l1 = PMD_TYPE_TABLE,
211 .prot_sect = PROT_SECT_DEVICE,
212 .domain = DOMAIN_IO,
213 },
214 [MT_DEVICE_CACHED] = { /* ioremap_cached */
215 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
216 .prot_l1 = PMD_TYPE_TABLE,
217 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
218 .domain = DOMAIN_IO,
219 },
220 [MT_DEVICE_WC] = { /* ioremap_wc */
221 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
222 .prot_l1 = PMD_TYPE_TABLE,
223 .prot_sect = PROT_SECT_DEVICE,
224 .domain = DOMAIN_IO,
225 },
226 [MT_UNCACHED] = {
227 .prot_pte = PROT_PTE_DEVICE,
228 .prot_l1 = PMD_TYPE_TABLE,
229 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
230 .domain = DOMAIN_IO,
231 },
232 [MT_CACHECLEAN] = {
233 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
234 .domain = DOMAIN_KERNEL,
235 },
236 #ifndef CONFIG_ARM_LPAE
237 [MT_MINICLEAN] = {
238 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
239 .domain = DOMAIN_KERNEL,
240 },
241 #endif
242 [MT_LOW_VECTORS] = {
243 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
244 L_PTE_RDONLY,
245 .prot_l1 = PMD_TYPE_TABLE,
246 .domain = DOMAIN_USER,
247 },
248 [MT_HIGH_VECTORS] = {
249 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
250 L_PTE_USER | L_PTE_RDONLY,
251 .prot_l1 = PMD_TYPE_TABLE,
252 .domain = DOMAIN_USER,
253 },
254 [MT_MEMORY] = {
255 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
256 .prot_l1 = PMD_TYPE_TABLE,
257 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
258 .domain = DOMAIN_KERNEL,
259 },
260 [MT_ROM] = {
261 .prot_sect = PMD_TYPE_SECT,
262 .domain = DOMAIN_KERNEL,
263 },
264 [MT_MEMORY_NONCACHED] = {
265 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
266 L_PTE_MT_BUFFERABLE,
267 .prot_l1 = PMD_TYPE_TABLE,
268 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
269 .domain = DOMAIN_KERNEL,
270 },
271 [MT_MEMORY_DTCM] = {
272 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
273 L_PTE_XN,
274 .prot_l1 = PMD_TYPE_TABLE,
275 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
276 .domain = DOMAIN_KERNEL,
277 },
278 [MT_MEMORY_ITCM] = {
279 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
280 .prot_l1 = PMD_TYPE_TABLE,
281 .domain = DOMAIN_KERNEL,
282 },
283 [MT_MEMORY_SO] = {
284 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
285 L_PTE_MT_UNCACHED,
286 .prot_l1 = PMD_TYPE_TABLE,
287 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
288 PMD_SECT_UNCACHED | PMD_SECT_XN,
289 .domain = DOMAIN_KERNEL,
290 },
291 };
292
get_mem_type(unsigned int type)293 const struct mem_type *get_mem_type(unsigned int type)
294 {
295 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
296 }
297 EXPORT_SYMBOL(get_mem_type);
298
299 /*
300 * Adjust the PMD section entries according to the CPU in use.
301 */
build_mem_type_table(void)302 static void __init build_mem_type_table(void)
303 {
304 struct cachepolicy *cp;
305 unsigned int cr = get_cr();
306 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
307 int cpu_arch = cpu_architecture();
308 int i;
309
310 if (cpu_arch < CPU_ARCH_ARMv6) {
311 #if defined(CONFIG_CPU_DCACHE_DISABLE)
312 if (cachepolicy > CPOLICY_BUFFERED)
313 cachepolicy = CPOLICY_BUFFERED;
314 #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
315 if (cachepolicy > CPOLICY_WRITETHROUGH)
316 cachepolicy = CPOLICY_WRITETHROUGH;
317 #endif
318 }
319 if (cpu_arch < CPU_ARCH_ARMv5) {
320 if (cachepolicy >= CPOLICY_WRITEALLOC)
321 cachepolicy = CPOLICY_WRITEBACK;
322 ecc_mask = 0;
323 }
324 if (is_smp())
325 cachepolicy = CPOLICY_WRITEALLOC;
326
327 /*
328 * Strip out features not present on earlier architectures.
329 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
330 * without extended page tables don't have the 'Shared' bit.
331 */
332 if (cpu_arch < CPU_ARCH_ARMv5)
333 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
334 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
335 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
336 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
337 mem_types[i].prot_sect &= ~PMD_SECT_S;
338
339 /*
340 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
341 * "update-able on write" bit on ARM610). However, Xscale and
342 * Xscale3 require this bit to be cleared.
343 */
344 if (cpu_is_xscale() || cpu_is_xsc3()) {
345 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
346 mem_types[i].prot_sect &= ~PMD_BIT4;
347 mem_types[i].prot_l1 &= ~PMD_BIT4;
348 }
349 } else if (cpu_arch < CPU_ARCH_ARMv6) {
350 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
351 if (mem_types[i].prot_l1)
352 mem_types[i].prot_l1 |= PMD_BIT4;
353 if (mem_types[i].prot_sect)
354 mem_types[i].prot_sect |= PMD_BIT4;
355 }
356 }
357
358 /*
359 * Mark the device areas according to the CPU/architecture.
360 */
361 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
362 if (!cpu_is_xsc3()) {
363 /*
364 * Mark device regions on ARMv6+ as execute-never
365 * to prevent speculative instruction fetches.
366 */
367 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
368 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
369 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
370 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
371 }
372 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
373 /*
374 * For ARMv7 with TEX remapping,
375 * - shared device is SXCB=1100
376 * - nonshared device is SXCB=0100
377 * - write combine device mem is SXCB=0001
378 * (Uncached Normal memory)
379 */
380 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
381 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
382 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
383 } else if (cpu_is_xsc3()) {
384 /*
385 * For Xscale3,
386 * - shared device is TEXCB=00101
387 * - nonshared device is TEXCB=01000
388 * - write combine device mem is TEXCB=00100
389 * (Inner/Outer Uncacheable in xsc3 parlance)
390 */
391 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
392 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
393 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
394 } else {
395 /*
396 * For ARMv6 and ARMv7 without TEX remapping,
397 * - shared device is TEXCB=00001
398 * - nonshared device is TEXCB=01000
399 * - write combine device mem is TEXCB=00100
400 * (Uncached Normal in ARMv6 parlance).
401 */
402 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
403 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
404 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
405 }
406 } else {
407 /*
408 * On others, write combining is "Uncached/Buffered"
409 */
410 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
411 }
412
413 /*
414 * Now deal with the memory-type mappings
415 */
416 cp = &cache_policies[cachepolicy];
417 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
418
419 /*
420 * Only use write-through for non-SMP systems
421 */
422 if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
423 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
424
425 /*
426 * Enable CPU-specific coherency if supported.
427 * (Only available on XSC3 at the moment.)
428 */
429 if (arch_is_coherent() && cpu_is_xsc3()) {
430 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
431 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
432 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
433 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
434 }
435 /*
436 * ARMv6 and above have extended page tables.
437 */
438 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
439 #ifndef CONFIG_ARM_LPAE
440 /*
441 * Mark cache clean areas and XIP ROM read only
442 * from SVC mode and no access from userspace.
443 */
444 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
445 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
446 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
447 #endif
448
449 if (is_smp()) {
450 /*
451 * Mark memory with the "shared" attribute
452 * for SMP systems
453 */
454 user_pgprot |= L_PTE_SHARED;
455 kern_pgprot |= L_PTE_SHARED;
456 vecs_pgprot |= L_PTE_SHARED;
457 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
458 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
459 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
460 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
461 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
462 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
463 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
464 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
465 }
466 }
467
468 /*
469 * Non-cacheable Normal - intended for memory areas that must
470 * not cause dirty cache line writebacks when used
471 */
472 if (cpu_arch >= CPU_ARCH_ARMv6) {
473 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
474 /* Non-cacheable Normal is XCB = 001 */
475 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
476 PMD_SECT_BUFFERED;
477 } else {
478 /* For both ARMv6 and non-TEX-remapping ARMv7 */
479 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
480 PMD_SECT_TEX(1);
481 }
482 } else {
483 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
484 }
485
486 #ifdef CONFIG_ARM_LPAE
487 /*
488 * Do not generate access flag faults for the kernel mappings.
489 */
490 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
491 mem_types[i].prot_pte |= PTE_EXT_AF;
492 if (mem_types[i].prot_sect)
493 mem_types[i].prot_sect |= PMD_SECT_AF;
494 }
495 kern_pgprot |= PTE_EXT_AF;
496 vecs_pgprot |= PTE_EXT_AF;
497 #endif
498
499 for (i = 0; i < 16; i++) {
500 pteval_t v = pgprot_val(protection_map[i]);
501 protection_map[i] = __pgprot(v | user_pgprot);
502 }
503
504 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
505 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
506
507 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
508 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
509 L_PTE_DIRTY | kern_pgprot);
510
511 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
512 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
513 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
514 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
515 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
516 mem_types[MT_ROM].prot_sect |= cp->pmd;
517
518 switch (cp->pmd) {
519 case PMD_SECT_WT:
520 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
521 break;
522 case PMD_SECT_WB:
523 case PMD_SECT_WBWA:
524 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
525 break;
526 }
527 printk("Memory policy: ECC %sabled, Data cache %s\n",
528 ecc_mask ? "en" : "dis", cp->policy);
529
530 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
531 struct mem_type *t = &mem_types[i];
532 if (t->prot_l1)
533 t->prot_l1 |= PMD_DOMAIN(t->domain);
534 if (t->prot_sect)
535 t->prot_sect |= PMD_DOMAIN(t->domain);
536 }
537 }
538
539 #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
phys_mem_access_prot(struct file * file,unsigned long pfn,unsigned long size,pgprot_t vma_prot)540 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
541 unsigned long size, pgprot_t vma_prot)
542 {
543 if (!pfn_valid(pfn))
544 return pgprot_noncached(vma_prot);
545 else if (file->f_flags & O_SYNC)
546 return pgprot_writecombine(vma_prot);
547 return vma_prot;
548 }
549 EXPORT_SYMBOL(phys_mem_access_prot);
550 #endif
551
552 #define vectors_base() (vectors_high() ? 0xffff0000 : 0)
553
early_alloc_aligned(unsigned long sz,unsigned long align)554 static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
555 {
556 void *ptr = __va(memblock_alloc(sz, align));
557 memset(ptr, 0, sz);
558 return ptr;
559 }
560
early_alloc(unsigned long sz)561 static void __init *early_alloc(unsigned long sz)
562 {
563 return early_alloc_aligned(sz, sz);
564 }
565
early_pte_alloc(pmd_t * pmd,unsigned long addr,unsigned long prot)566 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
567 {
568 if (pmd_none(*pmd)) {
569 pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
570 __pmd_populate(pmd, __pa(pte), prot);
571 }
572 BUG_ON(pmd_bad(*pmd));
573 return pte_offset_kernel(pmd, addr);
574 }
575
alloc_init_pte(pmd_t * pmd,unsigned long addr,unsigned long end,unsigned long pfn,const struct mem_type * type)576 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
577 unsigned long end, unsigned long pfn,
578 const struct mem_type *type)
579 {
580 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
581 do {
582 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
583 pfn++;
584 } while (pte++, addr += PAGE_SIZE, addr != end);
585 }
586
alloc_init_section(pud_t * pud,unsigned long addr,unsigned long end,phys_addr_t phys,const struct mem_type * type)587 static void __init alloc_init_section(pud_t *pud, unsigned long addr,
588 unsigned long end, phys_addr_t phys,
589 const struct mem_type *type)
590 {
591 pmd_t *pmd = pmd_offset(pud, addr);
592
593 /*
594 * Try a section mapping - end, addr and phys must all be aligned
595 * to a section boundary. Note that PMDs refer to the individual
596 * L1 entries, whereas PGDs refer to a group of L1 entries making
597 * up one logical pointer to an L2 table.
598 */
599 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
600 pmd_t *p = pmd;
601
602 #ifndef CONFIG_ARM_LPAE
603 if (addr & SECTION_SIZE)
604 pmd++;
605 #endif
606
607 do {
608 *pmd = __pmd(phys | type->prot_sect);
609 phys += SECTION_SIZE;
610 } while (pmd++, addr += SECTION_SIZE, addr != end);
611
612 flush_pmd_entry(p);
613 } else {
614 /*
615 * No need to loop; pte's aren't interested in the
616 * individual L1 entries.
617 */
618 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
619 }
620 }
621
alloc_init_pud(pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long phys,const struct mem_type * type)622 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
623 unsigned long end, unsigned long phys, const struct mem_type *type)
624 {
625 pud_t *pud = pud_offset(pgd, addr);
626 unsigned long next;
627
628 do {
629 next = pud_addr_end(addr, end);
630 alloc_init_section(pud, addr, next, phys, type);
631 phys += next - addr;
632 } while (pud++, addr = next, addr != end);
633 }
634
635 #ifndef CONFIG_ARM_LPAE
create_36bit_mapping(struct map_desc * md,const struct mem_type * type)636 static void __init create_36bit_mapping(struct map_desc *md,
637 const struct mem_type *type)
638 {
639 unsigned long addr, length, end;
640 phys_addr_t phys;
641 pgd_t *pgd;
642
643 addr = md->virtual;
644 phys = __pfn_to_phys(md->pfn);
645 length = PAGE_ALIGN(md->length);
646
647 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
648 printk(KERN_ERR "MM: CPU does not support supersection "
649 "mapping for 0x%08llx at 0x%08lx\n",
650 (long long)__pfn_to_phys((u64)md->pfn), addr);
651 return;
652 }
653
654 /* N.B. ARMv6 supersections are only defined to work with domain 0.
655 * Since domain assignments can in fact be arbitrary, the
656 * 'domain == 0' check below is required to insure that ARMv6
657 * supersections are only allocated for domain 0 regardless
658 * of the actual domain assignments in use.
659 */
660 if (type->domain) {
661 printk(KERN_ERR "MM: invalid domain in supersection "
662 "mapping for 0x%08llx at 0x%08lx\n",
663 (long long)__pfn_to_phys((u64)md->pfn), addr);
664 return;
665 }
666
667 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
668 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
669 " at 0x%08lx invalid alignment\n",
670 (long long)__pfn_to_phys((u64)md->pfn), addr);
671 return;
672 }
673
674 /*
675 * Shift bits [35:32] of address into bits [23:20] of PMD
676 * (See ARMv6 spec).
677 */
678 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
679
680 pgd = pgd_offset_k(addr);
681 end = addr + length;
682 do {
683 pud_t *pud = pud_offset(pgd, addr);
684 pmd_t *pmd = pmd_offset(pud, addr);
685 int i;
686
687 for (i = 0; i < 16; i++)
688 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
689
690 addr += SUPERSECTION_SIZE;
691 phys += SUPERSECTION_SIZE;
692 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
693 } while (addr != end);
694 }
695 #endif /* !CONFIG_ARM_LPAE */
696
697 /*
698 * Create the page directory entries and any necessary
699 * page tables for the mapping specified by `md'. We
700 * are able to cope here with varying sizes and address
701 * offsets, and we take full advantage of sections and
702 * supersections.
703 */
create_mapping(struct map_desc * md)704 static void __init create_mapping(struct map_desc *md)
705 {
706 unsigned long addr, length, end;
707 phys_addr_t phys;
708 const struct mem_type *type;
709 pgd_t *pgd;
710
711 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
712 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
713 " at 0x%08lx in user region\n",
714 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
715 return;
716 }
717
718 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
719 md->virtual >= PAGE_OFFSET &&
720 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
721 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
722 " at 0x%08lx out of vmalloc space\n",
723 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
724 }
725
726 type = &mem_types[md->type];
727
728 #ifndef CONFIG_ARM_LPAE
729 /*
730 * Catch 36-bit addresses
731 */
732 if (md->pfn >= 0x100000) {
733 create_36bit_mapping(md, type);
734 return;
735 }
736 #endif
737
738 addr = md->virtual & PAGE_MASK;
739 phys = __pfn_to_phys(md->pfn);
740 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
741
742 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
743 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
744 "be mapped using pages, ignoring.\n",
745 (long long)__pfn_to_phys(md->pfn), addr);
746 return;
747 }
748
749 pgd = pgd_offset_k(addr);
750 end = addr + length;
751 do {
752 unsigned long next = pgd_addr_end(addr, end);
753
754 alloc_init_pud(pgd, addr, next, phys, type);
755
756 phys += next - addr;
757 addr = next;
758 } while (pgd++, addr != end);
759 }
760
761 /*
762 * Create the architecture specific mappings
763 */
iotable_init(struct map_desc * io_desc,int nr)764 void __init iotable_init(struct map_desc *io_desc, int nr)
765 {
766 struct map_desc *md;
767 struct vm_struct *vm;
768
769 if (!nr)
770 return;
771
772 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
773
774 for (md = io_desc; nr; md++, nr--) {
775 create_mapping(md);
776 vm->addr = (void *)(md->virtual & PAGE_MASK);
777 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
778 vm->phys_addr = __pfn_to_phys(md->pfn);
779 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
780 vm->flags |= VM_ARM_MTYPE(md->type);
781 vm->caller = iotable_init;
782 vm_area_add_early(vm++);
783 }
784 }
785
786 #ifndef CONFIG_ARM_LPAE
787
788 /*
789 * The Linux PMD is made of two consecutive section entries covering 2MB
790 * (see definition in include/asm/pgtable-2level.h). However a call to
791 * create_mapping() may optimize static mappings by using individual
792 * 1MB section mappings. This leaves the actual PMD potentially half
793 * initialized if the top or bottom section entry isn't used, leaving it
794 * open to problems if a subsequent ioremap() or vmalloc() tries to use
795 * the virtual space left free by that unused section entry.
796 *
797 * Let's avoid the issue by inserting dummy vm entries covering the unused
798 * PMD halves once the static mappings are in place.
799 */
800
pmd_empty_section_gap(unsigned long addr)801 static void __init pmd_empty_section_gap(unsigned long addr)
802 {
803 struct vm_struct *vm;
804
805 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
806 vm->addr = (void *)addr;
807 vm->size = SECTION_SIZE;
808 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
809 vm->caller = pmd_empty_section_gap;
810 vm_area_add_early(vm);
811 }
812
fill_pmd_gaps(void)813 static void __init fill_pmd_gaps(void)
814 {
815 struct vm_struct *vm;
816 unsigned long addr, next = 0;
817 pmd_t *pmd;
818
819 /* we're still single threaded hence no lock needed here */
820 for (vm = vmlist; vm; vm = vm->next) {
821 if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
822 continue;
823 addr = (unsigned long)vm->addr;
824 if (addr < next)
825 continue;
826
827 /*
828 * Check if this vm starts on an odd section boundary.
829 * If so and the first section entry for this PMD is free
830 * then we block the corresponding virtual address.
831 */
832 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
833 pmd = pmd_off_k(addr);
834 if (pmd_none(*pmd))
835 pmd_empty_section_gap(addr & PMD_MASK);
836 }
837
838 /*
839 * Then check if this vm ends on an odd section boundary.
840 * If so and the second section entry for this PMD is empty
841 * then we block the corresponding virtual address.
842 */
843 addr += vm->size;
844 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
845 pmd = pmd_off_k(addr) + 1;
846 if (pmd_none(*pmd))
847 pmd_empty_section_gap(addr);
848 }
849
850 /* no need to look at any vm entry until we hit the next PMD */
851 next = (addr + PMD_SIZE - 1) & PMD_MASK;
852 }
853 }
854
855 #else
856 #define fill_pmd_gaps() do { } while (0)
857 #endif
858
859 static void * __initdata vmalloc_min =
860 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
861
862 /*
863 * vmalloc=size forces the vmalloc area to be exactly 'size'
864 * bytes. This can be used to increase (or decrease) the vmalloc
865 * area - the default is 240m.
866 */
early_vmalloc(char * arg)867 static int __init early_vmalloc(char *arg)
868 {
869 unsigned long vmalloc_reserve = memparse(arg, NULL);
870
871 if (vmalloc_reserve < SZ_16M) {
872 vmalloc_reserve = SZ_16M;
873 printk(KERN_WARNING
874 "vmalloc area too small, limiting to %luMB\n",
875 vmalloc_reserve >> 20);
876 }
877
878 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
879 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
880 printk(KERN_WARNING
881 "vmalloc area is too big, limiting to %luMB\n",
882 vmalloc_reserve >> 20);
883 }
884
885 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
886 return 0;
887 }
888 early_param("vmalloc", early_vmalloc);
889
890 static phys_addr_t lowmem_limit __initdata = 0;
891
sanity_check_meminfo(void)892 void __init sanity_check_meminfo(void)
893 {
894 int i, j, highmem = 0;
895
896 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
897 struct membank *bank = &meminfo.bank[j];
898 *bank = meminfo.bank[i];
899
900 if (bank->start > ULONG_MAX)
901 highmem = 1;
902
903 #ifdef CONFIG_HIGHMEM
904 if (__va(bank->start) >= vmalloc_min ||
905 __va(bank->start) < (void *)PAGE_OFFSET)
906 highmem = 1;
907
908 bank->highmem = highmem;
909
910 /*
911 * Split those memory banks which are partially overlapping
912 * the vmalloc area greatly simplifying things later.
913 */
914 if (!highmem && __va(bank->start) < vmalloc_min &&
915 bank->size > vmalloc_min - __va(bank->start)) {
916 if (meminfo.nr_banks >= NR_BANKS) {
917 printk(KERN_CRIT "NR_BANKS too low, "
918 "ignoring high memory\n");
919 } else {
920 memmove(bank + 1, bank,
921 (meminfo.nr_banks - i) * sizeof(*bank));
922 meminfo.nr_banks++;
923 i++;
924 bank[1].size -= vmalloc_min - __va(bank->start);
925 bank[1].start = __pa(vmalloc_min - 1) + 1;
926 bank[1].highmem = highmem = 1;
927 j++;
928 }
929 bank->size = vmalloc_min - __va(bank->start);
930 }
931 #else
932 bank->highmem = highmem;
933
934 /*
935 * Highmem banks not allowed with !CONFIG_HIGHMEM.
936 */
937 if (highmem) {
938 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
939 "(!CONFIG_HIGHMEM).\n",
940 (unsigned long long)bank->start,
941 (unsigned long long)bank->start + bank->size - 1);
942 continue;
943 }
944
945 /*
946 * Check whether this memory bank would entirely overlap
947 * the vmalloc area.
948 */
949 if (__va(bank->start) >= vmalloc_min ||
950 __va(bank->start) < (void *)PAGE_OFFSET) {
951 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
952 "(vmalloc region overlap).\n",
953 (unsigned long long)bank->start,
954 (unsigned long long)bank->start + bank->size - 1);
955 continue;
956 }
957
958 /*
959 * Check whether this memory bank would partially overlap
960 * the vmalloc area.
961 */
962 if (__va(bank->start + bank->size) > vmalloc_min ||
963 __va(bank->start + bank->size) < __va(bank->start)) {
964 unsigned long newsize = vmalloc_min - __va(bank->start);
965 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
966 "to -%.8llx (vmalloc region overlap).\n",
967 (unsigned long long)bank->start,
968 (unsigned long long)bank->start + bank->size - 1,
969 (unsigned long long)bank->start + newsize - 1);
970 bank->size = newsize;
971 }
972 #endif
973 if (!bank->highmem && bank->start + bank->size > lowmem_limit)
974 lowmem_limit = bank->start + bank->size;
975
976 j++;
977 }
978 #ifdef CONFIG_HIGHMEM
979 if (highmem) {
980 const char *reason = NULL;
981
982 if (cache_is_vipt_aliasing()) {
983 /*
984 * Interactions between kmap and other mappings
985 * make highmem support with aliasing VIPT caches
986 * rather difficult.
987 */
988 reason = "with VIPT aliasing cache";
989 }
990 if (reason) {
991 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
992 reason);
993 while (j > 0 && meminfo.bank[j - 1].highmem)
994 j--;
995 }
996 }
997 #endif
998 meminfo.nr_banks = j;
999 high_memory = __va(lowmem_limit - 1) + 1;
1000 memblock_set_current_limit(lowmem_limit);
1001 }
1002
prepare_page_table(void)1003 static inline void prepare_page_table(void)
1004 {
1005 unsigned long addr;
1006 phys_addr_t end;
1007
1008 /*
1009 * Clear out all the mappings below the kernel image.
1010 */
1011 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
1012 pmd_clear(pmd_off_k(addr));
1013
1014 #ifdef CONFIG_XIP_KERNEL
1015 /* The XIP kernel is mapped in the module area -- skip over it */
1016 addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
1017 #endif
1018 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
1019 pmd_clear(pmd_off_k(addr));
1020
1021 /*
1022 * Find the end of the first block of lowmem.
1023 */
1024 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
1025 if (end >= lowmem_limit)
1026 end = lowmem_limit;
1027
1028 /*
1029 * Clear out all the kernel space mappings, except for the first
1030 * memory bank, up to the vmalloc region.
1031 */
1032 for (addr = __phys_to_virt(end);
1033 addr < VMALLOC_START; addr += PMD_SIZE)
1034 pmd_clear(pmd_off_k(addr));
1035 }
1036
1037 #ifdef CONFIG_ARM_LPAE
1038 /* the first page is reserved for pgd */
1039 #define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
1040 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1041 #else
1042 #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
1043 #endif
1044
1045 /*
1046 * Reserve the special regions of memory
1047 */
arm_mm_memblock_reserve(void)1048 void __init arm_mm_memblock_reserve(void)
1049 {
1050 /*
1051 * Reserve the page tables. These are already in use,
1052 * and can only be in node 0.
1053 */
1054 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
1055
1056 #ifdef CONFIG_SA1111
1057 /*
1058 * Because of the SA1111 DMA bug, we want to preserve our
1059 * precious DMA-able memory...
1060 */
1061 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
1062 #endif
1063 }
1064
1065 /*
1066 * Set up the device mappings. Since we clear out the page tables for all
1067 * mappings above VMALLOC_START, we will remove any debug device mappings.
1068 * This means you have to be careful how you debug this function, or any
1069 * called function. This means you can't use any function or debugging
1070 * method which may touch any device, otherwise the kernel _will_ crash.
1071 */
devicemaps_init(struct machine_desc * mdesc)1072 static void __init devicemaps_init(struct machine_desc *mdesc)
1073 {
1074 struct map_desc map;
1075 unsigned long addr;
1076 void *vectors;
1077
1078 /*
1079 * Allocate the vector page early.
1080 */
1081 vectors = early_alloc(PAGE_SIZE);
1082
1083 early_trap_init(vectors);
1084
1085 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
1086 pmd_clear(pmd_off_k(addr));
1087
1088 /*
1089 * Map the kernel if it is XIP.
1090 * It is always first in the modulearea.
1091 */
1092 #ifdef CONFIG_XIP_KERNEL
1093 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
1094 map.virtual = MODULES_VADDR;
1095 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
1096 map.type = MT_ROM;
1097 create_mapping(&map);
1098 #endif
1099
1100 /*
1101 * Map the cache flushing regions.
1102 */
1103 #ifdef FLUSH_BASE
1104 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1105 map.virtual = FLUSH_BASE;
1106 map.length = SZ_1M;
1107 map.type = MT_CACHECLEAN;
1108 create_mapping(&map);
1109 #endif
1110 #ifdef FLUSH_BASE_MINICACHE
1111 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1112 map.virtual = FLUSH_BASE_MINICACHE;
1113 map.length = SZ_1M;
1114 map.type = MT_MINICLEAN;
1115 create_mapping(&map);
1116 #endif
1117
1118 /*
1119 * Create a mapping for the machine vectors at the high-vectors
1120 * location (0xffff0000). If we aren't using high-vectors, also
1121 * create a mapping at the low-vectors virtual address.
1122 */
1123 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
1124 map.virtual = 0xffff0000;
1125 map.length = PAGE_SIZE;
1126 map.type = MT_HIGH_VECTORS;
1127 create_mapping(&map);
1128
1129 if (!vectors_high()) {
1130 map.virtual = 0;
1131 map.type = MT_LOW_VECTORS;
1132 create_mapping(&map);
1133 }
1134
1135 /*
1136 * Ask the machine support to map in the statically mapped devices.
1137 */
1138 if (mdesc->map_io)
1139 mdesc->map_io();
1140 fill_pmd_gaps();
1141
1142 /*
1143 * Finally flush the caches and tlb to ensure that we're in a
1144 * consistent state wrt the writebuffer. This also ensures that
1145 * any write-allocated cache lines in the vector page are written
1146 * back. After this point, we can start to touch devices again.
1147 */
1148 local_flush_tlb_all();
1149 flush_cache_all();
1150 }
1151
kmap_init(void)1152 static void __init kmap_init(void)
1153 {
1154 #ifdef CONFIG_HIGHMEM
1155 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1156 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1157 #endif
1158 }
1159
map_lowmem(void)1160 static void __init map_lowmem(void)
1161 {
1162 struct memblock_region *reg;
1163
1164 /* Map all the lowmem memory banks. */
1165 for_each_memblock(memory, reg) {
1166 phys_addr_t start = reg->base;
1167 phys_addr_t end = start + reg->size;
1168 struct map_desc map;
1169
1170 if (end > lowmem_limit)
1171 end = lowmem_limit;
1172 if (start >= end)
1173 break;
1174
1175 map.pfn = __phys_to_pfn(start);
1176 map.virtual = __phys_to_virt(start);
1177 map.length = end - start;
1178 map.type = MT_MEMORY;
1179
1180 create_mapping(&map);
1181 }
1182 }
1183
1184 /*
1185 * paging_init() sets up the page tables, initialises the zone memory
1186 * maps, and sets up the zero page, bad page and bad page tables.
1187 */
paging_init(struct machine_desc * mdesc)1188 void __init paging_init(struct machine_desc *mdesc)
1189 {
1190 void *zero_page;
1191
1192 memblock_set_current_limit(lowmem_limit);
1193
1194 build_mem_type_table();
1195 prepare_page_table();
1196 map_lowmem();
1197 devicemaps_init(mdesc);
1198 kmap_init();
1199
1200 top_pmd = pmd_off_k(0xffff0000);
1201
1202 /* allocate the zero page. */
1203 zero_page = early_alloc(PAGE_SIZE);
1204
1205 bootmem_init();
1206
1207 empty_zero_page = virt_to_page(zero_page);
1208 __flush_dcache_page(NULL, empty_zero_page);
1209 }
1210