1 #ifndef _ASM_POWERPC_HUGETLB_H
2 #define _ASM_POWERPC_HUGETLB_H
3 
4 #ifdef CONFIG_HUGETLB_PAGE
5 #include <asm/page.h>
6 
7 extern struct kmem_cache *hugepte_cache;
8 
hugepd_page(hugepd_t hpd)9 static inline pte_t *hugepd_page(hugepd_t hpd)
10 {
11 	BUG_ON(!hugepd_ok(hpd));
12 	return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
13 }
14 
hugepd_shift(hugepd_t hpd)15 static inline unsigned int hugepd_shift(hugepd_t hpd)
16 {
17 	return hpd.pd & HUGEPD_SHIFT_MASK;
18 }
19 
hugepte_offset(hugepd_t * hpdp,unsigned long addr,unsigned pdshift)20 static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
21 				    unsigned pdshift)
22 {
23 	/*
24 	 * On FSL BookE, we have multiple higher-level table entries that
25 	 * point to the same hugepte.  Just use the first one since they're all
26 	 * identical.  So for that case, idx=0.
27 	 */
28 	unsigned long idx = 0;
29 
30 	pte_t *dir = hugepd_page(*hpdp);
31 #ifndef CONFIG_PPC_FSL_BOOK3E
32 	idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp);
33 #endif
34 
35 	return dir + idx;
36 }
37 
38 pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
39 				 unsigned long addr, unsigned *shift);
40 
41 void flush_dcache_icache_hugepage(struct page *page);
42 
43 #if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
44 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
45 			   unsigned long len);
46 #else
is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len)47 static inline int is_hugepage_only_range(struct mm_struct *mm,
48 					 unsigned long addr,
49 					 unsigned long len)
50 {
51 	return 0;
52 }
53 #endif
54 
55 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
56 			    pte_t pte);
57 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
58 
59 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
60 			    unsigned long end, unsigned long floor,
61 			    unsigned long ceiling);
62 
63 /*
64  * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
65  * to override the version in mm/hugetlb.c
66  */
67 #define vma_mmu_pagesize vma_mmu_pagesize
68 
69 /*
70  * If the arch doesn't supply something else, assume that hugepage
71  * size aligned regions are ok without further preparation.
72  */
prepare_hugepage_range(struct file * file,unsigned long addr,unsigned long len)73 static inline int prepare_hugepage_range(struct file *file,
74 			unsigned long addr, unsigned long len)
75 {
76 	struct hstate *h = hstate_file(file);
77 	if (len & ~huge_page_mask(h))
78 		return -EINVAL;
79 	if (addr & ~huge_page_mask(h))
80 		return -EINVAL;
81 	return 0;
82 }
83 
hugetlb_prefault_arch_hook(struct mm_struct * mm)84 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
85 {
86 }
87 
88 
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)89 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
90 				   pte_t *ptep, pte_t pte)
91 {
92 	set_pte_at(mm, addr, ptep, pte);
93 }
94 
huge_ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)95 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
96 					    unsigned long addr, pte_t *ptep)
97 {
98 #ifdef CONFIG_PPC64
99 	return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
100 #else
101 	return __pte(pte_update(ptep, ~0UL, 0));
102 #endif
103 }
104 
huge_ptep_clear_flush(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)105 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
106 					 unsigned long addr, pte_t *ptep)
107 {
108 	pte_t pte;
109 	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
110 	flush_tlb_page(vma, addr);
111 }
112 
huge_pte_none(pte_t pte)113 static inline int huge_pte_none(pte_t pte)
114 {
115 	return pte_none(pte);
116 }
117 
huge_pte_wrprotect(pte_t pte)118 static inline pte_t huge_pte_wrprotect(pte_t pte)
119 {
120 	return pte_wrprotect(pte);
121 }
122 
huge_ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte,int dirty)123 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
124 					     unsigned long addr, pte_t *ptep,
125 					     pte_t pte, int dirty)
126 {
127 #ifdef HUGETLB_NEED_PRELOAD
128 	/*
129 	 * The "return 1" forces a call of update_mmu_cache, which will write a
130 	 * TLB entry.  Without this, platforms that don't do a write of the TLB
131 	 * entry in the TLB miss handler asm will fault ad infinitum.
132 	 */
133 	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
134 	return 1;
135 #else
136 	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
137 #endif
138 }
139 
huge_ptep_get(pte_t * ptep)140 static inline pte_t huge_ptep_get(pte_t *ptep)
141 {
142 	return *ptep;
143 }
144 
arch_prepare_hugepage(struct page * page)145 static inline int arch_prepare_hugepage(struct page *page)
146 {
147 	return 0;
148 }
149 
arch_release_hugepage(struct page * page)150 static inline void arch_release_hugepage(struct page *page)
151 {
152 }
153 
154 #else /* ! CONFIG_HUGETLB_PAGE */
flush_hugetlb_page(struct vm_area_struct * vma,unsigned long vmaddr)155 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
156 				      unsigned long vmaddr)
157 {
158 }
159 #endif /* CONFIG_HUGETLB_PAGE */
160 
161 
162 /*
163  * FSL Book3E platforms require special gpage handling - the gpages
164  * are reserved early in the boot process by memblock instead of via
165  * the .dts as on IBM platforms.
166  */
167 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
168 extern void __init reserve_hugetlb_gpages(void);
169 #else
reserve_hugetlb_gpages(void)170 static inline void reserve_hugetlb_gpages(void)
171 {
172 }
173 #endif
174 
175 #endif /* _ASM_POWERPC_HUGETLB_H */
176