1 #ifndef __ASM_SH_PGALLOC_H
2 #define __ASM_SH_PGALLOC_H
3 
4 #include <asm/processor.h>
5 #include <linux/threads.h>
6 #include <linux/slab.h>
7 
8 #define pgd_quicklist ((unsigned long *)0)
9 #define pmd_quicklist ((unsigned long *)0)
10 #define pte_quicklist ((unsigned long *)0)
11 #define pgtable_cache_size 0L
12 
13 #define pmd_populate(mm, pmd, pte) \
14 		set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
15 
16 /*
17  * Allocate and free page tables.
18  */
19 
pgd_alloc(struct mm_struct * mm)20 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
21 {
22 	unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
23 	pgd_t *pgd = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL);
24 
25 	if (pgd)
26 		memset(pgd, 0, pgd_size);
27 
28 	return pgd;
29 }
30 
pgd_free(pgd_t * pgd)31 static inline void pgd_free(pgd_t *pgd)
32 {
33 	kfree(pgd);
34 }
35 
pte_alloc_one(struct mm_struct * mm,unsigned long address)36 static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
37 {
38 	pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
39 	if (pte)
40 		clear_page(pte);
41 	return pte;
42 }
43 
pte_alloc_one_fast(struct mm_struct * mm,unsigned long address)44 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
45 {
46 	return 0;
47 }
48 
pte_free_slow(pte_t * pte)49 static inline void pte_free_slow(pte_t *pte)
50 {
51 	free_page((unsigned long)pte);
52 }
53 
54 #define pte_free(pte)		pte_free_slow(pte)
55 
56 /*
57  * allocating and freeing a pmd is trivial: the 1-entry pmd is
58  * inside the pgd, so has no extra memory associated with it.
59  */
pmd_free(pmd_t * pmd)60 static inline void pmd_free(pmd_t * pmd)
61 {
62 }
63 
64 #define pmd_alloc_one_fast(mm, addr)	({ BUG(); ((pmd_t *)1); })
65 #define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
66 #define pmd_free_slow(x)		do { } while (0)
67 #define pmd_free_fast(x)		do { } while (0)
68 #define pmd_free(x)			do { } while (0)
69 #define pgd_populate(mm, pmd, pte)	BUG()
70 
71 /* Do nothing */
do_check_pgt_cache(int low,int high)72 static inline int do_check_pgt_cache(int low, int high) { return 0; }
73 
74 /*
75  * TLB flushing:
76  *
77  *  - flush_tlb() flushes the current mm struct TLBs
78  *  - flush_tlb_all() flushes all processes TLBs
79  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
80  *  - flush_tlb_page(vma, vmaddr) flushes one page
81  *  - flush_tlb_range(mm, start, end) flushes a range of pages
82  *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
83  */
84 
85 extern void flush_tlb(void);
86 extern void flush_tlb_all(void);
87 extern void flush_tlb_mm(struct mm_struct *mm);
88 extern void flush_tlb_range(struct mm_struct *mm, unsigned long start,
89 			    unsigned long end);
90 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
91 extern void __flush_tlb_page(unsigned long asid, unsigned long page);
92 
flush_tlb_pgtables(struct mm_struct * mm,unsigned long start,unsigned long end)93 static inline void flush_tlb_pgtables(struct mm_struct *mm,
94 				      unsigned long start, unsigned long end)
95 { /* Nothing to do */
96 }
97 
98 #if defined(__SH4__)
99 /*
100  * For SH-4, we have our own implementation for ptep_get_and_clear
101  */
ptep_get_and_clear(pte_t * ptep)102 static inline pte_t ptep_get_and_clear(pte_t *ptep)
103 {
104 	pte_t pte = *ptep;
105 
106 	pte_clear(ptep);
107 	if (!pte_not_present(pte)) {
108 		struct page *page = pte_page(pte);
109 		if (VALID_PAGE(page)&&
110 		    (!page->mapping || !(page->mapping->i_mmap_shared)))
111 			__clear_bit(PG_mapped, &page->flags);
112 	}
113 	return pte;
114 }
115 #else
ptep_get_and_clear(pte_t * ptep)116 static inline pte_t ptep_get_and_clear(pte_t *ptep)
117 {
118 	pte_t pte = *ptep;
119 	pte_clear(ptep);
120 	return pte;
121 }
122 #endif
123 
124 /*
125  * Following functions are same as generic ones.
126  */
ptep_test_and_clear_young(pte_t * ptep)127 static inline int ptep_test_and_clear_young(pte_t *ptep)
128 {
129 	pte_t pte = *ptep;
130 	if (!pte_young(pte))
131 		return 0;
132 	set_pte(ptep, pte_mkold(pte));
133 	return 1;
134 }
135 
ptep_test_and_clear_dirty(pte_t * ptep)136 static inline int ptep_test_and_clear_dirty(pte_t *ptep)
137 {
138 	pte_t pte = *ptep;
139 	if (!pte_dirty(pte))
140 		return 0;
141 	set_pte(ptep, pte_mkclean(pte));
142 	return 1;
143 }
144 
ptep_set_wrprotect(pte_t * ptep)145 static inline void ptep_set_wrprotect(pte_t *ptep)
146 {
147 	pte_t old_pte = *ptep;
148 	set_pte(ptep, pte_wrprotect(old_pte));
149 }
150 
ptep_mkdirty(pte_t * ptep)151 static inline void ptep_mkdirty(pte_t *ptep)
152 {
153 	pte_t old_pte = *ptep;
154 	set_pte(ptep, pte_mkdirty(old_pte));
155 }
156 #endif /* __ASM_SH_PGALLOC_H */
157