1 #ifndef _CRIS_PGALLOC_H
2 #define _CRIS_PGALLOC_H
3
4 #include <asm/page.h>
5 #include <linux/threads.h>
6
7 extern struct pgtable_cache_struct {
8 unsigned long *pgd_cache;
9 unsigned long *pte_cache;
10 unsigned long pgtable_cache_sz;
11 } quicklists;
12
13 #define pgd_quicklist (quicklists.pgd_cache)
14 #define pmd_quicklist ((unsigned long *)0)
15 #define pte_quicklist (quicklists.pte_cache)
16 #define pgtable_cache_size (quicklists.pgtable_cache_sz)
17
18 #define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
19
20 /*
21 * Allocate and free page tables.
22 */
23
get_pgd_slow(void)24 extern __inline__ pgd_t *get_pgd_slow(void)
25 {
26 pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
27
28 if (ret) {
29 memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
30 memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD,
31 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
32 }
33 return ret;
34 }
35
free_pgd_slow(pgd_t * pgd)36 extern __inline__ void free_pgd_slow(pgd_t *pgd)
37 {
38 free_page((unsigned long)pgd);
39 }
40
get_pgd_fast(void)41 extern __inline__ pgd_t *get_pgd_fast(void)
42 {
43 unsigned long *ret;
44
45 if ((ret = pgd_quicklist) != NULL) {
46 pgd_quicklist = (unsigned long *)(*ret);
47 ret[0] = 0;
48 pgtable_cache_size--;
49 } else
50 ret = (unsigned long *)get_pgd_slow();
51 return (pgd_t *)ret;
52 }
53
free_pgd_fast(pgd_t * pgd)54 extern __inline__ void free_pgd_fast(pgd_t *pgd)
55 {
56 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
57 pgd_quicklist = (unsigned long *) pgd;
58 pgtable_cache_size++;
59 }
60
pte_alloc_one(struct mm_struct * mm,unsigned long address)61 extern inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
62 {
63 pte_t *pte;
64
65 pte = (pte_t *) __get_free_page(GFP_KERNEL);
66 if (pte)
67 clear_page(pte);
68 return pte;
69 }
70
pte_alloc_one_fast(struct mm_struct * mm,unsigned long address)71 extern inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
72 {
73 unsigned long *ret;
74
75 if((ret = (unsigned long *)pte_quicklist) != NULL) {
76 pte_quicklist = (unsigned long *)(*ret);
77 ret[0] = ret[1];
78 pgtable_cache_size--;
79 }
80 return (pte_t *)ret;
81 }
82
pte_free_fast(pte_t * pte)83 extern __inline__ void pte_free_fast(pte_t *pte)
84 {
85 *(unsigned long *)pte = (unsigned long) pte_quicklist;
86 pte_quicklist = (unsigned long *) pte;
87 pgtable_cache_size++;
88 }
89
pte_free_slow(pte_t * pte)90 extern __inline__ void pte_free_slow(pte_t *pte)
91 {
92 free_page((unsigned long)pte);
93 }
94
95 #define pte_free(pte) pte_free_slow(pte)
96 #define pgd_free(pgd) free_pgd_slow(pgd)
97 #define pgd_alloc(mm) get_pgd_fast()
98
99 /*
100 * We don't have any real pmd's, and this code never triggers because
101 * the pgd will always be present..
102 */
103
104 #define pmd_alloc_one_fast(mm, addr) ({ BUG(); ((pmd_t *)1); })
105 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
106 #define pmd_free_slow(x) do { } while (0)
107 #define pmd_free_fast(x) do { } while (0)
108 #define pmd_free(x) do { } while (0)
109 #define pgd_populate(mm, pmd, pte) BUG()
110
111 /* other stuff */
112
113 extern int do_check_pgt_cache(int, int);
114
115 #endif
116