1 /*
2 * Set up paging and the MMU.
3 *
4 * Copyright (C) 2000-2003, Axis Communications AB.
5 *
6 * Authors: Bjorn Wesen <bjornw@axis.com>
7 * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port.
8 */
9 #include <linux/mmzone.h>
10 #include <linux/init.h>
11 #include <linux/bootmem.h>
12 #include <linux/mm.h>
13 #include <asm/pgtable.h>
14 #include <asm/page.h>
15 #include <asm/types.h>
16 #include <asm/mmu.h>
17 #include <asm/io.h>
18 #include <asm/mmu_context.h>
19 #include <arch/hwregs/asm/mmu_defs_asm.h>
20 #include <arch/hwregs/supp_reg.h>
21
22 extern void tlb_init(void);
23
24 /*
25 * The kernel is already mapped with linear mapping at kseg_c so there's no
26 * need to map it with a page table. However, head.S also temporarily mapped it
27 * at kseg_4 thus the ksegs are set up again. Also clear the TLB and do various
28 * other paging stuff.
29 */
cris_mmu_init(void)30 void __init cris_mmu_init(void)
31 {
32 unsigned long mmu_config;
33 unsigned long mmu_kbase_hi;
34 unsigned long mmu_kbase_lo;
35 unsigned short mmu_page_id;
36
37 /*
38 * Make sure the current pgd table points to something sane, even if it
39 * is most probably not used until the next switch_mm.
40 */
41 per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd;
42
43 #ifdef CONFIG_SMP
44 {
45 pgd_t **pgd;
46 pgd = (pgd_t**)&per_cpu(current_pgd, smp_processor_id());
47 SUPP_BANK_SEL(1);
48 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
49 SUPP_BANK_SEL(2);
50 SUPP_REG_WR(RW_MM_TLB_PGD, pgd);
51 }
52 #endif
53
54 /* Initialise the TLB. Function found in tlb.c. */
55 tlb_init();
56
57 /*
58 * Enable exceptions and initialize the kernel segments.
59 * See head.S for differences between ARTPEC-3 and ETRAX FS.
60 */
61 mmu_config = ( REG_STATE(mmu, rw_mm_cfg, we, on) |
62 REG_STATE(mmu, rw_mm_cfg, acc, on) |
63 REG_STATE(mmu, rw_mm_cfg, ex, on) |
64 REG_STATE(mmu, rw_mm_cfg, inv, on) |
65 #ifdef CONFIG_CRIS_MACH_ARTPEC3
66 REG_STATE(mmu, rw_mm_cfg, seg_f, page) |
67 REG_STATE(mmu, rw_mm_cfg, seg_e, page) |
68 REG_STATE(mmu, rw_mm_cfg, seg_d, linear) |
69 #else
70 REG_STATE(mmu, rw_mm_cfg, seg_f, linear) |
71 REG_STATE(mmu, rw_mm_cfg, seg_e, linear) |
72 REG_STATE(mmu, rw_mm_cfg, seg_d, page) |
73 #endif
74 REG_STATE(mmu, rw_mm_cfg, seg_c, linear) |
75 REG_STATE(mmu, rw_mm_cfg, seg_b, linear) |
76 #ifndef CONFIG_ETRAX_VCS_SIM
77 REG_STATE(mmu, rw_mm_cfg, seg_a, page) |
78 #else
79 REG_STATE(mmu, rw_mm_cfg, seg_a, linear) |
80 #endif
81 REG_STATE(mmu, rw_mm_cfg, seg_9, page) |
82 REG_STATE(mmu, rw_mm_cfg, seg_8, page) |
83 REG_STATE(mmu, rw_mm_cfg, seg_7, page) |
84 REG_STATE(mmu, rw_mm_cfg, seg_6, page) |
85 REG_STATE(mmu, rw_mm_cfg, seg_5, page) |
86 REG_STATE(mmu, rw_mm_cfg, seg_4, page) |
87 REG_STATE(mmu, rw_mm_cfg, seg_3, page) |
88 REG_STATE(mmu, rw_mm_cfg, seg_2, page) |
89 REG_STATE(mmu, rw_mm_cfg, seg_1, page) |
90 REG_STATE(mmu, rw_mm_cfg, seg_0, page));
91
92 /* See head.S for differences between ARTPEC-3 and ETRAX FS. */
93 mmu_kbase_hi = ( REG_FIELD(mmu, rw_mm_kbase_hi, base_f, 0x0) |
94 #ifdef CONFIG_CRIS_MACH_ARTPEC3
95 REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x0) |
96 REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x5) |
97 #else
98 REG_FIELD(mmu, rw_mm_kbase_hi, base_e, 0x8) |
99 REG_FIELD(mmu, rw_mm_kbase_hi, base_d, 0x0) |
100 #endif
101 REG_FIELD(mmu, rw_mm_kbase_hi, base_c, 0x4) |
102 REG_FIELD(mmu, rw_mm_kbase_hi, base_b, 0xb) |
103 #ifndef CONFIG_ETRAX_VCS_SIM
104 REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0x0) |
105 #else
106 REG_FIELD(mmu, rw_mm_kbase_hi, base_a, 0xa) |
107 #endif
108 REG_FIELD(mmu, rw_mm_kbase_hi, base_9, 0x0) |
109 REG_FIELD(mmu, rw_mm_kbase_hi, base_8, 0x0));
110
111 mmu_kbase_lo = ( REG_FIELD(mmu, rw_mm_kbase_lo, base_7, 0x0) |
112 REG_FIELD(mmu, rw_mm_kbase_lo, base_6, 0x0) |
113 REG_FIELD(mmu, rw_mm_kbase_lo, base_5, 0x0) |
114 REG_FIELD(mmu, rw_mm_kbase_lo, base_4, 0x0) |
115 REG_FIELD(mmu, rw_mm_kbase_lo, base_3, 0x0) |
116 REG_FIELD(mmu, rw_mm_kbase_lo, base_2, 0x0) |
117 REG_FIELD(mmu, rw_mm_kbase_lo, base_1, 0x0) |
118 REG_FIELD(mmu, rw_mm_kbase_lo, base_0, 0x0));
119
120 mmu_page_id = REG_FIELD(mmu, rw_mm_tlb_hi, pid, 0);
121
122 /* Update the instruction MMU. */
123 SUPP_BANK_SEL(BANK_IM);
124 SUPP_REG_WR(RW_MM_CFG, mmu_config);
125 SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
126 SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
127 SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
128
129 /* Update the data MMU. */
130 SUPP_BANK_SEL(BANK_DM);
131 SUPP_REG_WR(RW_MM_CFG, mmu_config);
132 SUPP_REG_WR(RW_MM_KBASE_HI, mmu_kbase_hi);
133 SUPP_REG_WR(RW_MM_KBASE_LO, mmu_kbase_lo);
134 SUPP_REG_WR(RW_MM_TLB_HI, mmu_page_id);
135
136 SPEC_REG_WR(SPEC_REG_PID, 0);
137
138 /*
139 * The MMU has been enabled ever since head.S but just to make it
140 * totally obvious enable it here as well.
141 */
142 SUPP_BANK_SEL(BANK_GC);
143 SUPP_REG_WR(RW_GC_CFG, 0xf); /* IMMU, DMMU, ICache, DCache on */
144 }
145
paging_init(void)146 void __init paging_init(void)
147 {
148 int i;
149 unsigned long zones_size[MAX_NR_ZONES];
150
151 printk("Setting up paging and the MMU.\n");
152
153 /* Clear out the init_mm.pgd that will contain the kernel's mappings. */
154 for(i = 0; i < PTRS_PER_PGD; i++)
155 swapper_pg_dir[i] = __pgd(0);
156
157 cris_mmu_init();
158
159 /*
160 * Initialize the bad page table and bad page to point to a couple of
161 * allocated pages.
162 */
163 empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
164 memset((void *) empty_zero_page, 0, PAGE_SIZE);
165
166 /* All pages are DMA'able in Etrax, so put all in the DMA'able zone. */
167 zones_size[0] = ((unsigned long) high_memory - PAGE_OFFSET) >> PAGE_SHIFT;
168
169 for (i = 1; i < MAX_NR_ZONES; i++)
170 zones_size[i] = 0;
171
172 /*
173 * Use free_area_init_node instead of free_area_init, because it is
174 * designed for systems where the DRAM starts at an address
175 * substantially higher than 0, like us (we start at PAGE_OFFSET). This
176 * saves space in the mem_map page array.
177 */
178 free_area_init_node(0, zones_size, PAGE_OFFSET >> PAGE_SHIFT, 0);
179
180 mem_map = contig_page_data.node_mem_map;
181 }
182