1 #ifdef __KERNEL__
2 #ifndef _PPC_PGALLOC_H
3 #define _PPC_PGALLOC_H
4
5 #include <linux/config.h>
6 #include <linux/threads.h>
7 #include <asm/processor.h>
8
9 #ifdef CONFIG_PTE_64BIT
10 /* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */
11 #define PGDIR_ORDER 1
12 #else
13 #define PGDIR_ORDER 0
14 #endif
15
16 /*
17 * This is handled very differently on the PPC since out page tables
18 * are all 0's and I want to be able to use these zero'd pages elsewhere
19 * as well - it gives us quite a speedup.
20 *
21 * Note that the SMP/UP versions are the same but we don't need a
22 * per cpu list of zero pages because we do the zero-ing with the cache
23 * off and the access routines are lock-free but the pgt cache stuff
24 * is per-cpu since it isn't done with any lock-free access routines
25 * (although I think we need arch-specific routines so I can do lock-free).
26 *
27 * I need to generalize this so we can use it for other arch's as well.
28 * -- Cort
29 */
30 #ifdef CONFIG_SMP
31 #define quicklists cpu_data[smp_processor_id()]
32 #else
33 extern struct pgtable_cache_struct {
34 unsigned long *pgd_cache;
35 unsigned long *pte_cache;
36 unsigned long pgtable_cache_sz;
37 } quicklists;
38 #endif
39
40 #define pgd_quicklist (quicklists.pgd_cache)
41 #define pmd_quicklist ((unsigned long *)0)
42 #define pte_quicklist (quicklists.pte_cache)
43 #define pgtable_cache_size (quicklists.pgtable_cache_sz)
44
45 extern unsigned long *zero_cache; /* head linked list of pre-zero'd pages */
46 extern atomic_t zero_sz; /* # currently pre-zero'd pages */
47 extern atomic_t zeropage_hits; /* # zero'd pages request that we've done */
48 extern atomic_t zeropage_calls; /* # zero'd pages request that've been made */
49 extern atomic_t zerototal; /* # pages zero'd over time */
50
51 #define zero_quicklist (zero_cache)
52 #define zero_cache_sz (zero_sz)
53 #define zero_cache_calls (zeropage_calls)
54 #define zero_cache_hits (zeropage_hits)
55 #define zero_cache_total (zerototal)
56
57 /* return a pre-zero'd page from the list, return NULL if none available -- Cort */
58 extern unsigned long get_zero_page_fast(void);
59
60 extern void __bad_pte(pmd_t *pmd);
61
get_pgd_slow(void)62 extern __inline__ pgd_t *get_pgd_slow(void)
63 {
64 pgd_t *ret;
65
66 if ((ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGDIR_ORDER)) != NULL)
67 clear_page(ret);
68 return ret;
69 }
70
get_pgd_fast(void)71 extern __inline__ pgd_t *get_pgd_fast(void)
72 {
73 unsigned long *ret;
74
75 if ((ret = pgd_quicklist) != NULL) {
76 pgd_quicklist = (unsigned long *)(*ret);
77 ret[0] = 0;
78 pgtable_cache_size--;
79 } else
80 ret = (unsigned long *)get_pgd_slow();
81 return (pgd_t *)ret;
82 }
83
free_pgd_fast(pgd_t * pgd)84 extern __inline__ void free_pgd_fast(pgd_t *pgd)
85 {
86 *(unsigned long **)pgd = pgd_quicklist;
87 pgd_quicklist = (unsigned long *) pgd;
88 pgtable_cache_size++;
89 }
90
free_pgd_slow(pgd_t * pgd)91 extern __inline__ void free_pgd_slow(pgd_t *pgd)
92 {
93 free_page((unsigned long)pgd);
94 }
95
96 #define pgd_free(pgd) free_pgd_fast(pgd)
97 #define pgd_alloc(mm) get_pgd_fast()
98
99 /*
100 * We don't have any real pmd's, and this code never triggers because
101 * the pgd will always be present..
102 */
103 #define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
104 #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
105 #define pmd_free(x) do { } while (0)
106 #define pgd_populate(mm, pmd, pte) BUG()
107
pte_alloc_one(struct mm_struct * mm,unsigned long address)108 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
109 {
110 pte_t *pte;
111 extern int mem_init_done;
112 extern void *early_get_page(void);
113
114 if (mem_init_done)
115 pte = (pte_t *) __get_free_page(GFP_KERNEL);
116 else
117 pte = (pte_t *) early_get_page();
118 if (pte != NULL)
119 clear_page(pte);
120 return pte;
121 }
122
pte_alloc_one_fast(struct mm_struct * mm,unsigned long address)123 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
124 {
125 unsigned long *ret;
126
127 if ((ret = pte_quicklist) != NULL) {
128 pte_quicklist = (unsigned long *)(*ret);
129 ret[0] = 0;
130 pgtable_cache_size--;
131 }
132 return (pte_t *)ret;
133 }
134
pte_free_fast(pte_t * pte)135 extern __inline__ void pte_free_fast(pte_t *pte)
136 {
137 *(unsigned long **)pte = pte_quicklist;
138 pte_quicklist = (unsigned long *) pte;
139 pgtable_cache_size++;
140 }
141
pte_free_slow(pte_t * pte)142 extern __inline__ void pte_free_slow(pte_t *pte)
143 {
144 free_page((unsigned long)pte);
145 }
146
147 #define pte_free(pte) pte_free_slow(pte)
148
149 #define pmd_populate(mm, pmd, pte) (pmd_val(*(pmd)) = (unsigned long) (pte))
150
151 extern int do_check_pgt_cache(int, int);
152
153 #endif /* _PPC_PGALLOC_H */
154 #endif /* __KERNEL__ */
155