1 /*
2  *  linux/arch/arm/mm/mm-armo.c
3  *
4  *  Copyright (C) 1998-2000 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  Page table sludge for older ARM processor architectures.
11  */
12 #include <linux/sched.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/bootmem.h>
16 
17 #include <asm/pgtable.h>
18 #include <asm/pgalloc.h>
19 #include <asm/page.h>
20 #include <asm/arch/memory.h>
21 
22 #include <asm/mach/map.h>
23 
24 #define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
25 
26 kmem_cache_t *pte_cache, *pgd_cache;
27 int page_nr;
28 
29 /*
30  * Allocate a page table.  Note that we place the MEMC
31  * table before the page directory.  This means we can
32  * easily get to both tightly-associated data structures
33  * with a single pointer.
34  */
alloc_pgd_table(int priority)35 static inline pgd_t *alloc_pgd_table(int priority)
36 {
37 	void *pg2k = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
38 
39 	if (pg2k)
40 		pg2k += MEMC_TABLE_SIZE;
41 
42 	return (pgd_t *)pg2k;
43 }
44 
free_pgd_slow(pgd_t * pgd)45 void free_pgd_slow(pgd_t *pgd)
46 {
47 	unsigned long tbl = (unsigned long)pgd;
48 
49 	/*
50 	 * CHECKME: are we leaking pte tables here???
51 	 */
52 
53 	tbl -= MEMC_TABLE_SIZE;
54 
55 	kmem_cache_free(pgd_cache, (void *)tbl);
56 }
57 
get_pgd_slow(struct mm_struct * mm)58 pgd_t *get_pgd_slow(struct mm_struct *mm)
59 {
60 	pgd_t *new_pgd, *init_pgd;
61 	pmd_t *new_pmd, *init_pmd;
62 	pte_t *new_pte, *init_pte;
63 
64 	new_pgd = alloc_pgd_table(GFP_KERNEL);
65 	if (!new_pgd)
66 		goto no_pgd;
67 
68 	/*
69 	 * This lock is here just to satisfy pmd_alloc and pte_lock
70 	 */
71 	spin_lock(&mm->page_table_lock);
72 
73 	/*
74 	 * On ARM, first page must always be allocated since it contains
75 	 * the machine vectors.
76 	 */
77 	new_pmd = pmd_alloc(mm, new_pgd, 0);
78 	if (!new_pmd)
79 		goto no_pmd;
80 
81 	new_pte = pte_alloc(mm, new_pmd, 0);
82 	if (!new_pte)
83 		goto no_pte;
84 
85 	init_pgd = pgd_offset_k(0);
86 	init_pmd = pmd_offset(init_pgd, 0);
87 	init_pte = pte_offset(init_pmd, 0);
88 
89 	set_pte(new_pte, *init_pte);
90 
91 	/*
92 	 * most of the page table entries are zeroed
93 	 * wne the table is created.
94 	 */
95 	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
96 		(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
97 
98 	spin_unlock(&mm->page_table_lock);
99 
100 	/* update MEMC tables */
101 	cpu_memc_update_all(new_pgd);
102 	return new_pgd;
103 
104 no_pte:
105 	spin_unlock(&mm->page_table_lock);
106 	pmd_free(new_pmd);
107 	check_pgt_cache();
108 	free_pgd_slow(new_pgd);
109 	return NULL;
110 
111 no_pmd:
112 	spin_unlock(&mm->page_table_lock);
113 	free_pgd_slow(new_pgd);
114 	return NULL;
115 
116 no_pgd:
117 	return NULL;
118 }
119 
120 /*
121  * No special code is required here.
122  */
setup_mm_for_reboot(char mode)123 void setup_mm_for_reboot(char mode)
124 {
125 }
126 
127 /*
128  * This contains the code to setup the memory map on an ARM2/ARM250/ARM3
129  * machine. This is both processor & architecture specific, and requires
130  * some more work to get it to fit into our separate processor and
131  * architecture structure.
132  */
memtable_init(struct meminfo * mi)133 void __init memtable_init(struct meminfo *mi)
134 {
135 	pte_t *pte;
136 	int i;
137 
138 	page_nr = max_low_pfn;
139 
140 	pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
141 	pte[0] = mk_pte_phys(PAGE_OFFSET + 491520, PAGE_READONLY);
142 	pmd_populate(&init_mm, pmd_offset(swapper_pg_dir, 0), pte);
143 
144 	for (i = 1; i < PTRS_PER_PGD; i++)
145 		pgd_val(swapper_pg_dir[i]) = 0;
146 }
147 
iotable_init(struct map_desc * io_desc)148 void __init iotable_init(struct map_desc *io_desc)
149 {
150 	/* nothing to do */
151 }
152 
153 /*
154  * We never have holes in the memmap
155  */
create_memmap_holes(struct meminfo * mi)156 void __init create_memmap_holes(struct meminfo *mi)
157 {
158 }
159 
pte_cache_ctor(void * pte,kmem_cache_t * cache,unsigned long flags)160 static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
161 {
162 	memzero(pte, sizeof(pte_t) * PTRS_PER_PTE);
163 }
164 
pgd_cache_ctor(void * pte,kmem_cache_t * cache,unsigned long flags)165 static void pgd_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
166 {
167 	pgd_t *pgd = (pte + MEMC_TABLE_SIZE);
168 
169 	memzero(pgd, USER_PTRS_PER_PGD * sizeof(pgd_t));
170 }
171 
pgtable_cache_init(void)172 void __init pgtable_cache_init(void)
173 {
174 	pte_cache = kmem_cache_create("pte-cache",
175 				sizeof(pte_t) * PTRS_PER_PTE,
176 				0, 0, pte_cache_ctor, NULL);
177 	if (!pte_cache)
178 		BUG();
179 
180 	pgd_cache = kmem_cache_create("pgd-cache", MEMC_TABLE_SIZE +
181 				sizeof(pgd_t) * PTRS_PER_PGD,
182 				0, 0, pgd_cache_ctor, NULL);
183 	if (!pgd_cache)
184 		BUG();
185 }
186